repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
wengole/eveonline-assistant
|
refs/heads/master
|
eveonline-assistant/plans/__init__.py
|
12133432
| |
robclark/chromium
|
refs/heads/master
|
tools/json_schema_compiler/highlighters/__init__.py
|
12133432
| |
kcpawan/django
|
refs/heads/master
|
tests/migration_test_data_persistence/__init__.py
|
12133432
| |
gigitux/lollypop
|
refs/heads/master
|
src/selectionlist.py
|
2
|
# Copyright (c) 2014-2015 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk, GLib, GObject, Pango
from cgi import escape
from lollypop.utils import format_artist_name
from lollypop.define import Type, Lp
class SelectionPopover(Gtk.Popover):
"""
Show a popover with text
"""
def __init__(self):
"""
Init popover
"""
Gtk.Popover.__init__(self)
self.set_modal(False)
self._label = Gtk.Label()
self._label.set_property('halign', Gtk.Align.CENTER)
self._label.set_property('valign', Gtk.Align.CENTER)
self._label.show()
self.get_style_context().add_class('osd-popover')
self.set_property('width-request', 100)
self.set_property('height-request', 50)
self.add(self._label)
def set_text(self, text):
"""
Set popover text
@param text as string
"""
self._label.set_markup('<span size="large"><b>%s</b></span>' % text)
def do_grab_focus(self):
"""
Ignore
"""
pass
class MotionEvent:
"""
Keep track of last motion event coordonates
"""
x = 0.0
y = 0.0
class SelectionList(Gtk.ScrolledWindow):
"""
A list for artists/genres
"""
__gsignals__ = {
'item-selected': (GObject.SignalFlags.RUN_FIRST, None, (int,)),
'populated': (GObject.SignalFlags.RUN_FIRST, None, ()),
}
def __init__(self):
"""
Init Selection list ui
"""
Gtk.ScrolledWindow.__init__(self)
self.set_policy(Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC)
self._last_motion_event = MotionEvent()
self._previous_motion_y = 0.0
self._timeout = None
self._to_select_id = Type.NONE
self._updating = False # Sort disabled if False
self._is_artists = False # for string translation
self._popover = SelectionPopover()
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/SelectionList.ui')
builder.connect_signals(self)
self._model = builder.get_object('model')
self._model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self._model.set_sort_func(0, self._sort_items)
self._view = builder.get_object('view')
self._view.set_row_separator_func(self._row_separator_func)
self._renderer0 = Gtk.CellRendererText()
self._renderer0.set_property('ellipsize-set', True)
self._renderer0.set_property('ellipsize', Pango.EllipsizeMode.END)
renderer1 = Gtk.CellRendererPixbuf()
column = Gtk.TreeViewColumn('')
column.pack_start(self._renderer0, True)
column.pack_start(renderer1, False)
column.add_attribute(self._renderer0, 'text', 1)
column.add_attribute(renderer1, 'icon-name', 2)
column.set_expand(True)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self._view.append_column(column)
self._view.connect('motion_notify_event', self._on_motion_notify)
self._view.set_property('has_tooltip', True)
self.add(self._view)
adj = self.get_vadjustment()
adj.connect('value_changed', self._on_scroll)
def mark_as_artists(self, is_artists):
"""
Mark list as artists list
@param is_artists as bool
"""
self._is_artists = is_artists
def is_marked_as_artists(self):
"""
Return True if list is marked as artists
"""
return self._is_artists
def populate(self, values):
"""
Populate view with values
@param [(int, str)], will be deleted
@thread safe
"""
if len(self._model) > 0:
self._updating = True
self._add_values(values)
self.emit('populated')
self._updating = False
def remove(self, object_id):
"""
Remove row from model
@param object id as int
"""
for item in self._model:
if item[0] == object_id:
self._model.remove(item.iter)
break
def add_value(self, value):
"""
Add item to list
@param value as (int, str)
"""
self._updating = True
self._add_value(value)
self._updating = False
def update_values(self, values):
"""
Update view with values
@param [(int, str)]
@thread safe
"""
self._updating = True
# Remove not found items but not devices
value_ids = set([v[0] for v in values])
for item in self._model:
if item[0] > Type.DEVICES and not item[0] in value_ids:
self._model.remove(item.iter)
# Add items which are not already in the list
item_ids = set([i[0] for i in self._model])
for value in values:
if not value[0] in item_ids:
self._add_value(value)
self._updating = False
def get_value(self, object_id):
"""
Return value for id
@param id as int
@return value as string
"""
for item in self._model:
if item[0] == object_id:
return item[1]
return ''
def will_be_selected(self):
"""
Return True if list will select an item on populate
@return selected as bool
"""
return self._to_select_id != Type.NONE
def select_id(self, object_id):
"""
Make treeview select first default item
@param object id as int
"""
self._to_select_id = Type.NONE
try:
selected = None
for item in self._model:
if item[0] == object_id:
selected = item.iter
# Select later
if selected is None:
self._to_select_id = object_id
else:
path = self._model.get_path(selected)
self._view.set_cursor(path, None, False)
except:
self._to_select_id = object_id
def get_selected_id(self):
"""
Get id at current position
@return id as int
"""
selected_id = Type.NONE
(path, column) = self._view.get_cursor()
if path is not None:
iterator = self._model.get_iter(path)
if iterator is not None:
selected_id = self._model.get_value(iterator, 0)
return selected_id
def clear(self):
"""
Clear treeview
"""
self._updating = True
self._model.clear()
self._updating = False
#######################
# PRIVATE #
#######################
def _add_value(self, value):
"""
Add value to the model
@param value as [int, str]
@thread safe
"""
self._model.append([value[0],
value[1],
self._get_icon_name(value[0])])
if value[0] == self._to_select_id:
self.select_id(self._to_select_id)
def _add_values(self, values):
"""
Add values to the list
@param items as [(int,str)]
@thread safe
"""
for value in values:
self._add_value(value)
def _get_icon_name(self, object_id):
"""
Return pixbuf for id
@param ojbect_id as id
"""
icon = ''
if object_id >= 0:
icon = 'go-next-symbolic'
elif object_id == Type.POPULARS:
icon = 'starred-symbolic'
elif object_id == Type.PLAYLISTS:
icon = 'emblem-documents-symbolic'
elif object_id == Type.ALL:
if self._is_artists:
icon = 'media-optical-cd-audio-symbolic'
else:
icon = 'avatar-default-symbolic'
elif object_id == Type.COMPILATIONS:
icon = 'system-users-symbolic'
elif object_id == Type.RECENTS:
icon = 'document-open-recent-symbolic'
elif object_id == Type.RADIOS:
icon = 'audio-input-microphone-symbolic'
elif object_id < Type.DEVICES:
icon = 'multimedia-player-symbolic'
elif object_id == Type.RANDOMS:
icon = 'media-playlist-shuffle-symbolic'
elif object_id == Type.LOVED:
icon = 'emblem-favorite-symbolic'
elif object_id == Type.NEVER:
icon = 'document-new-symbolic'
return icon
def _sort_items(self, model, itera, iterb, data):
"""
Sort model
"""
if not self._updating:
return False
a_index = model.get_value(itera, 0)
b_index = model.get_value(iterb, 0)
a = format_artist_name(model.get_value(itera, 1))
b = format_artist_name(model.get_value(iterb, 1))
# Static vs static
if a_index < 0 and b_index < 0:
return a_index < b_index
# Static entries always on top
elif b_index < 0:
return True
# Static entries always on top
if a_index < 0:
return False
# String comparaison for non static
else:
return a.lower() > b.lower()
def _row_separator_func(self, model, iterator):
"""
Draw a separator if needed
@param model as Gtk.TreeModel
@param iterator as Gtk.TreeIter
"""
return model.get_value(iterator, 0) == Type.SEPARATOR
def _on_cursor_changed(self, view):
"""
Forward "cursor-changed" as "item-selected" with item id as arg
@param view as Gtk.TreeView
"""
selected_id = self.get_selected_id()
if not self._updating and selected_id != Type.NONE:
self._to_select_id = Type.NONE
self.emit('item-selected', selected_id)
def _on_focus_in_event(self, widget, event):
"""
Disable shortcuts
@param widget as Gtk.widget
@param event as GdK.Event
"""
Lp.window.enable_global_shorcuts(False)
def _on_focus_out_event(self, widget, event):
"""
Enable shortcuts
@param widget as Gtk.widget
@param event as GdK.Event
"""
Lp.window.enable_global_shorcuts(True)
def _on_leave_event(self, widget=None, event=None):
"""
Hide popover
@param widget as Gtk.widget
@param event as GdK.Event
"""
self._popover.hide()
self._timeout = None
def _on_motion_notify(self, widget, event):
"""
Set motion event
@param widget as Gtk.widget
@param event as GdK.Event
"""
if self._timeout is None:
self._timeout = GLib.timeout_add(500,
self._on_leave_event)
if event.x < 0.0 or event.y < 0.0:
return
self._last_motion_event.x = event.x
self._last_motion_event.y = event.y
def _on_scroll(self, adj):
"""
Show a popover with current letter
@param adj as Gtk.Adjustement
"""
# Only show if scrolled window is huge
if adj.get_upper() < adj.get_page_size() * 3:
return
if self._last_motion_event is None:
return
if self._timeout is not None:
GLib.source_remove(self._timeout)
self._timeout = None
dest_row = self._view.get_dest_row_at_pos(self._last_motion_event.x,
self._last_motion_event.y)
if dest_row is None:
return
row = dest_row[0]
if row is None:
return
row_iter = self._model.get_iter(row)
if row_iter is None or self._model.get_value(row_iter, 0) < 0:
return
text = self._model.get_value(row_iter, 1)
if text:
if self._is_artists:
text = format_artist_name(text)
self._popover.set_text(" %s " % text[0].upper())
self._popover.set_relative_to(self)
r = Gdk.Rectangle()
r.x = self.get_allocated_width()
r.y = self._last_motion_event.y
r.width = 1
r.height = 1
self._popover.set_pointing_to(r)
self._popover.set_position(Gtk.PositionType.RIGHT)
self._popover.show()
def _on_query_tooltip(self, widget, x, y, keyboard, tooltip):
"""
Show tooltip if needed
@param widget as Gtk.Widget
@param x as int
@param y as int
@param keyboard as bool
@param tooltip as Gtk.Tooltip
"""
if keyboard:
return True
(exists, tx, ty, model, path, i) = self._view.get_tooltip_context(
x,
y,
False)
if exists:
ctx = self._view.get_pango_context()
layout = Pango.Layout.new(ctx)
iterator = self._model.get_iter(path)
if iterator is not None:
text = self._model.get_value(iterator, 1)
column = self._view.get_column(0)
(position, width) = column.cell_get_position(self._renderer0)
layout.set_ellipsize(Pango.EllipsizeMode.END)
layout.set_width(Pango.units_from_double(width-8))
layout.set_text(text, -1)
if layout.is_ellipsized():
tooltip.set_markup(escape(text))
return True
return False
|
caseyrollins/osf.io
|
refs/heads/develop
|
tests/test_webtests.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import datetime as dt
import httplib as http
import logging
import unittest
import markupsafe
import mock
import pytest
from nose.tools import * # flake8: noqa (PEP8 asserts)
import re
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (
fake_email,
AuthUserFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
SubjectFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
)
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page_name = 'home'
wiki_content = 'Kittens'
wiki_page = WikiFactory(
user=self.user,
node=project,
)
wiki = WikiVersionFactory(
wiki_page=wiki_page,
content=wiki_content
)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page_name,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
WikiPage.objects.create_for_node(project, 'WöRlÐé', 'new content', Auth(self.user))
wv = WikiVersion.objects.get_for_node(project, non_ascii)
assert wv.wiki_page.page_name.upper() == non_ascii.decode('utf-8').upper()
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
@pytest.mark.enable_bookmark_creation
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
@pytest.mark.enable_bookmark_creation
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in('Anonymous Contributors', res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': 'not_valid'},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
@pytest.mark.enable_bookmark_creation
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = WikiFactory(
user=self.user,
node=self.component,
)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_implicit_clean
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake_email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake_email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake_email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip('as long as E-mails cannot be changed')
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
@pytest.mark.enable_implicit_clean
class TestExplorePublicActivity(OsfTestCase):
def setUp(self):
super(TestExplorePublicActivity, self).setUp()
self.project = ProjectFactory(is_public=True)
self.registration = RegistrationFactory(project=self.project)
self.private_project = ProjectFactory(title='Test private project')
self.popular_project = ProjectFactory(is_public=True)
self.popular_registration = RegistrationFactory(project=self.project, is_public=True)
# Add project to new and noteworthy projects
self.new_and_noteworthy_links_node = ProjectFactory(is_public=True)
self.new_and_noteworthy_links_node._id = settings.NEW_AND_NOTEWORTHY_LINKS_NODE
self.new_and_noteworthy_links_node.add_pointer(self.project, auth=Auth(self.new_and_noteworthy_links_node.creator), save=True)
# Set up popular projects and registrations
self.popular_links_node = ProjectFactory(is_public=True)
settings.POPULAR_LINKS_NODE = self.popular_links_node._id
self.popular_links_node.add_pointer(self.popular_project, auth=Auth(self.popular_links_node.creator), save=True)
self.popular_links_registrations = ProjectFactory(is_public=True)
settings.POPULAR_LINKS_REGISTRATIONS = self.popular_links_registrations._id
self.popular_links_registrations.add_pointer(self.popular_registration, auth=Auth(self.popular_links_registrations.creator), save=True)
def test_explore_page_loads_when_settings_not_configured(self):
old_settings_values = settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS
settings.POPULAR_LINKS_NODE = 'notanode'
settings.NEW_AND_NOTEWORTHY_LINKS_NODE = 'alsototallywrong'
settings.POPULAR_LINKS_REGISTRATIONS = 'nopenope'
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_equal(res.status_code, 200)
settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS = old_settings_values
def test_new_and_noteworthy_and_popular_nodes_show_in_explore_activity(self):
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_equal(res.status_code, 200)
# New and Noteworthy
assert_in(str(self.project.title), res)
assert_in(str(self.project.created.date()), res)
assert_in(str(self.registration.title), res)
assert_in(str(self.registration.registered_date.date()), res)
assert_not_in(str(self.private_project.title), res)
# Popular Projects and Registrations
assert_in(str(self.popular_project.title), res)
assert_in(str(self.popular_project.created.date()), res)
assert_in(str(self.popular_registration.title), res)
assert_in(str(self.popular_registration.registered_date.date()), res)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body)
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = 'random@random.com'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.disable_account()
self.user.save()
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
@pytest.mark.enable_bookmark_creation
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.provider_two = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_two = ProjectFactory(creator=self.admin, is_public=True)
self.project_three = ProjectFactory(creator=self.admin, is_public=True)
self.subject_one = SubjectFactory()
self.subject_two = SubjectFactory()
self.file_one = test_utils.create_test_file(self.project_one, self.admin, 'mgla.pdf')
self.file_two = test_utils.create_test_file(self.project_two, self.admin, 'saor.pdf')
self.published_preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.unpublished_preprint = PreprintFactory(creator=self.admin, filename='saor.pdf', provider=self.provider_two, subjects=[[self.subject_two._id]], project=self.project_two, is_published=False)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_published_preprint(self):
self.project_one.is_public = False
self.project_one.save()
url = self.project_one.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_public_project_unpublished_preprint(self):
url = self.project_two.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_unpublished_preprint(self):
# Do not show banner on unpublished preprints
self.project_two.is_public = False
self.project_two.save()
url = self.project_two.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_public_project_no_preprint(self):
url = self.project_three.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_no_preprint(self):
self.project_three.is_public = False
self.project_three.save()
url = self.project_three.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
if __name__ == '__main__':
unittest.main()
|
michalskrivanek/cockpit
|
refs/heads/master
|
test/common/testlib.py
|
2
|
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
"""
Tools for writing Cockpit test cases.
"""
from time import sleep
import argparse
import errno
import subprocess
import os
import select
import shutil
import socket
import sys
import traceback
import random
import re
import json
import tempfile
import time
import unittest
import tap
import testvm
TEST_DIR = os.path.normpath(os.path.dirname(os.path.realpath(os.path.join(__file__, ".."))))
BOTS_DIR = os.path.normpath(os.path.join(TEST_DIR, "..", "bots"))
os.environ["PATH"] = "{0}:{1}:{2}".format(os.environ.get("PATH"), BOTS_DIR, TEST_DIR)
__all__ = (
# Test definitions
'test_main',
'arg_parser',
'Browser',
'MachineCase',
'skipImage',
'Error',
'sit',
'wait',
'opts',
'TEST_DIR',
)
# Command line options
opts = argparse.Namespace()
opts.sit = False
opts.trace = False
opts.attachments = None
opts.revision = None
opts.address = None
opts.jobs = 1
opts.fetch = True
def attach(filename):
if not opts.attachments:
return
dest = os.path.join(opts.attachments, os.path.basename(filename))
if os.path.exists(filename) and not os.path.exists(dest):
shutil.move(filename, dest)
class Browser:
def __init__(self, address, label, port=None):
if ":" in address:
(self.address, unused, self.port) = address.rpartition(":")
else:
self.address = address
self.port = 9090
if port is not None:
self.port = port
self.default_user = "admin"
self.label = label
self.phantom = Phantom("en_US.utf8")
self.password = "foobar"
def title(self):
return self.phantom.eval('document.title')
def open(self, href, cookie=None):
"""
Load a page into the browser.
Arguments:
page: The path of the Cockpit page to load, such as "/dashboard".
url: The full URL to load.
Either PAGE or URL needs to be given.
Raises:
Error: When a timeout occurs waiting for the page to load.
"""
if href.startswith("/"):
href = "http://%s:%s%s" % (self.address, self.port, href)
def tryopen(hard=False):
try:
self.phantom.kill()
if cookie is not None:
self.phantom.cookies(cookie)
self.phantom.open(href)
return True
except:
if hard:
raise
return False
tries = 0
while not tryopen(tries >= 20):
print "Restarting browser..."
sleep(0.1)
tries = tries + 1
def reload(self):
self.switch_to_top()
self.wait_js_cond("ph_select('iframe.container-frame').every(function (e) { return e.getAttribute('data-loaded'); })")
self.phantom.reload()
def expect_load(self):
self.phantom.expect_load()
def switch_to_frame(self, name):
self.phantom.switch_frame(name)
def switch_to_top(self):
self.phantom.switch_top()
def upload_file(self, selector, file):
self.phantom.upload_file(selector, file)
def eval_js(self, code):
return self.phantom.eval(code)
def call_js_func(self, func, *args):
return self.phantom.eval("%s(%s)" % (func, ','.join(map(jsquote, args))))
def cookie(self, name):
cookies = self.phantom.cookies()
for c in cookies:
if c['name'] == name:
return c['value']
return None
def go(self, hash, host="localhost"):
# if not hash.startswith("/@"):
# hash = "/@" + host + hash
self.call_js_func('ph_go', hash)
def click(self, selector, force=False):
self.call_js_func('ph_click', selector, force)
def val(self, selector):
return self.call_js_func('ph_val', selector)
def set_val(self, selector, val):
self.call_js_func('ph_set_val', selector, val)
def text(self, selector):
return self.call_js_func('ph_text', selector)
def attr(self, selector, attr):
return self.call_js_func('ph_attr', selector, attr)
def set_attr(self, selector, attr, val):
self.call_js_func('ph_set_attr', selector, attr, val and 'true' or 'false')
def set_checked(self, selector, val):
self.call_js_func('ph_set_checked', selector, val)
def focus(self, selector):
self.call_js_func('ph_focus', selector)
def key_press(self, keys):
return self.phantom.keys('keypress', keys)
def wait_timeout(self, timeout):
browser = self
class WaitParamsRestorer():
def __init__(self, timeout):
self.timeout = timeout
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
browser.phantom.timeout = self.timeout
r = WaitParamsRestorer(self.phantom.timeout)
self.phantom.timeout = max(timeout, self.phantom.timeout)
return r
def wait(self, predicate):
self.arm_timeout()
while True:
val = predicate()
if val:
self.disarm_timeout()
return val
self.wait_checkpoint()
def inject_js(self, code):
self.phantom.do(code);
def wait_js_cond(self, cond):
return self.phantom.wait(cond)
def wait_js_func(self, func, *args):
return self.phantom.wait("%s(%s)" % (func, ','.join(map(jsquote, args))))
def is_present(self, selector):
return self.call_js_func('ph_is_present', selector)
def wait_present(self, selector):
return self.wait_js_func('ph_is_present', selector)
def wait_not_present(self, selector):
return self.wait_js_func('!ph_is_present', selector)
def is_visible(self, selector):
return self.call_js_func('ph_is_visible', selector)
def wait_visible(self, selector):
return self.wait_js_func('ph_is_visible', selector)
def wait_val(self, selector, val):
return self.wait_js_func('ph_has_val', selector, val)
def wait_not_val(self, selector, val):
return self.wait_js_func('!ph_has_val', selector, val)
def wait_attr(self, selector, attr, val):
return self.wait_js_func('ph_has_attr', selector, attr, val)
def wait_not_attr(self, selector, attr, val):
return self.wait_js_func('!ph_has_attr', selector, attr, val)
def wait_not_visible(self, selector):
return self.wait_js_func('!ph_is_visible', selector)
def wait_in_text(self, selector, text):
return self.wait_js_func('ph_in_text', selector, text)
def wait_not_in_text(self, selector, text):
return self.wait_js_func('!ph_in_text', selector, text)
def wait_text(self, selector, text):
return self.wait_js_func('ph_text_is', selector, text)
def wait_text_not(self, selector, text):
return self.wait_js_func('!ph_text_is', selector, text)
def wait_popup(self, id):
"""Wait for a popup to open.
Arguments:
id: The 'id' attribute of the popup.
"""
self.wait_visible('#' + id);
def wait_popdown(self, id):
"""Wait for a popup to close.
Arguments:
id: The 'id' attribute of the popup.
"""
self.wait_not_visible('#' + id)
def arm_timeout(self):
return self.phantom.arm_timeout(self.phantom.timeout * 1000)
def disarm_timeout(self):
return self.phantom.disarm_timeout()
def wait_checkpoint(self):
return self.phantom.wait_checkpoint()
def dialog_complete(self, sel, button=".btn-primary", result="hide"):
self.click(sel + " " + button)
self.wait_not_present(sel + " .dialog-wait-ct")
dialog_visible = self.call_js_func('ph_is_visible', sel)
if result == "hide":
if dialog_visible:
raise AssertionError(sel + " dialog did not complete and close")
elif result == "fail":
if not dialog_visible:
raise AssertionError(sel + " dialog is closed no failures present")
dialog_error = self.call_js_func('ph_is_present', sel + " .dialog-error")
if not dialog_error:
raise AssertionError(sel + " dialog has no errors")
else:
raise Error("invalid dialog result argument: " + result)
def dialog_cancel(self, sel, button=".btn[data-dismiss='modal']"):
self.click(sel + " " + button)
self.wait_not_visible(sel)
def enter_page(self, path, host=None, reconnect=True):
"""Wait for a page to become current.
Arguments:
id: The identifier the page. This is a string starting with "/"
"""
assert path.startswith("/")
if host:
frame = host + path
else:
frame = "localhost" + path
frame = "cockpit1:" + frame
self.switch_to_top()
while True:
try:
self.wait_present("iframe.container-frame[name='%s'][data-loaded]" % frame)
self.wait_not_visible(".curtains-ct")
self.wait_visible("iframe.container-frame[name='%s']" % frame)
break
except Error, ex:
if reconnect and ex.msg.startswith('timeout'):
reconnect = False
if self.is_present("#machine-reconnect"):
self.click("#machine-reconnect", True)
self.wait_not_visible(".curtains-ct")
continue
exc_info = sys.exc_info()
raise exc_info[0], exc_info[1], exc_info[2]
self.switch_to_frame(frame)
self.wait_present("body")
self.wait_visible("body")
def leave_page(self):
self.switch_to_top()
def wait_action_btn(self, sel, entry):
self.wait_text(sel + ' button:first-child', entry);
def click_action_btn(self, sel, entry=None):
# We don't need to open the menu, it's enough to simulate a
# click on the invisible button.
if entry:
self.click(sel + ' a:contains("%s")' % entry, True);
else:
self.click(sel + ' button:first-child');
def login_and_go(self, path=None, user=None, host=None, authorized=True):
if user is None:
user = self.default_user
href = path
if not href:
href = "/"
if host:
href = "/@" + host + href
self.open(href)
self.wait_visible("#login")
self.set_val('#login-user-input', user)
self.set_val('#login-password-input', self.password)
self.set_checked('#authorized-input', authorized)
self.click('#login-button')
self.expect_load()
self.wait_present('#content')
self.wait_visible('#content')
if path:
self.enter_page(path.split("#")[0], host=host)
def logout(self):
self.switch_to_top()
self.wait_present("#navbar-dropdown")
self.wait_visible("#navbar-dropdown")
self.click("#navbar-dropdown")
self.click('#go-logout')
self.expect_load()
def relogin(self, path=None, user=None, authorized=None):
if user is None:
user = self.default_user
self.logout()
self.wait_visible("#login")
self.set_val("#login-user-input", user)
self.set_val("#login-password-input", self.password)
if authorized is not None:
self.set_checked('#authorized-input', authorized)
self.click('#login-button')
self.expect_load()
self.wait_present('#content')
self.wait_visible('#content')
if path:
if path.startswith("/@"):
host = path[2:].split("/")[0]
else:
host = None
self.enter_page(path.split("#")[0], host=host)
def snapshot(self, title, label=None):
"""Take a snapshot of the current screen and save it as a PNG and HTML.
Arguments:
title: Used for the filename.
"""
if self.phantom and self.phantom.valid:
filename = "{0}-{1}.png".format(label or self.label, title)
self.phantom.show(filename)
attach(filename)
filename = "{0}-{1}.html".format(label or self.label, title)
self.phantom.dump(filename)
attach(filename)
def copy_js_log(self, title, label=None):
"""Copy the current javascript log"""
if self.phantom and self.phantom.valid:
filename = "{0}-{1}.js.log".format(label or self.label, title)
self.phantom.dump_log(filename)
attach(filename)
def kill(self):
self.phantom.kill()
class MachineCase(unittest.TestCase):
machine = None
machines = { }
machine_class = None
browser = None
network = None
# provision is a dictionary of dictionaries, one for each additional machine to be created, e.g.:
# provision = { 'openshift' : { 'image': 'openshift', 'memory_mb': 1024 } }
# These will be instantiated during setUp, and replaced with machine objects
provision = None
def label(self):
(unused, sep, label) = self.id().partition(".")
return label.replace(".", "-")
def new_machine(self, image=testvm.DEFAULT_IMAGE, forward={ }, **kwargs):
import testvm
machine_class = self.machine_class
if opts.address:
if machine_class or forward:
raise unittest.SkipTest("Cannot run this test when specific machine address is specified")
machine = testvm.Machine(address=opts.address, image=image, verbose=opts.trace)
self.addCleanup(lambda: machine.disconnect())
else:
if not machine_class:
machine_class = testvm.VirtMachine
if not self.network:
network = testvm.VirtNetwork()
self.addCleanup(lambda: network.kill())
self.network = network
networking = self.network.host(restrict=True, forward=forward)
machine = machine_class(verbose=opts.trace, networking=networking, image=image, **kwargs)
if opts.fetch and not os.path.exists(machine.image_file):
machine.pull(machine.image_file)
self.addCleanup(lambda: machine.kill())
return machine
def new_browser(self, machine=None):
if machine is None:
machine = self.machine
label = self.label() + "-" + machine.label
browser = Browser(machine.web_address, label=label, port=machine.web_port)
self.addCleanup(lambda: browser.kill())
return browser
def checkSuccess(self):
if not self.currentResult:
return False
for error in self.currentResult.errors:
if self == error[0]:
return False
for failure in self.currentResult.failures:
if self == failure[0]:
return False
for success in self.currentResult.unexpectedSuccesses:
if self == success:
return False
for skipped in self.currentResult.skipped:
if self == skipped[0]:
return False
return True
def run(self, result=None):
orig_result = result
# We need a result to intercept, so create one here
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self.currentResult = result
# Here's the loop to actually retry running the test. It's an awkward
# place for this loop, since it only applies to MachineCase based
# TestCases. However for the time being there is no better place for it.
#
# Policy actually dictates retries. The number here is an upper bound to
# prevent endless retries if Policy.check_retry is buggy.
max_retry_hard_limit = 10
for retry in range(0, max_retry_hard_limit):
try:
super(MachineCase, self).run(result)
except RetryError, ex:
assert retry < max_retry_hard_limit
sys.stderr.write("{0}\n".format(ex))
sleep(retry * 10)
else:
break
self.currentResult = None
# Standard book keeping that we have to do
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def setUp(self):
if opts.address and self.provision is not None:
raise unittest.SkipTest("Cannot provision multiple machines if a specific machine address is specified")
self.machine = None
self.browser = None
self.machines = { }
provision = self.provision or { 'machine1': { } }
# First create all machines, wait for them later
for key in sorted(provision.keys()):
options = provision[key].copy()
if 'address' in options:
del options['address']
if 'dns' in options:
del options['dns']
if 'dhcp' in options:
del options['dhcp']
machine = self.new_machine(**options)
self.machines[key] = machine
if not self.machine:
self.machine = machine
if opts.trace:
print "Starting {0} {1}".format(key, machine.label)
machine.start()
def sitter():
if opts.sit and not self.checkSuccess():
self.currentResult.printErrors()
sit(self.machines)
self.addCleanup(sitter)
# Now wait for the other machines to be up
for key in self.machines.keys():
machine = self.machines[key]
machine.wait_boot()
address = provision[key].get("address")
if address is not None:
machine.set_address(address)
dns = provision[key].get("dns")
if address or dns:
machine.set_dns(dns)
dhcp = provision[key].get("dhcp", False)
if dhcp:
machine.dhcp_server()
if self.machine:
self.browser = self.new_browser()
self.tmpdir = tempfile.mkdtemp()
def intercept():
if not self.checkSuccess():
self.snapshot("FAIL")
self.copy_js_log("FAIL")
self.copy_journal("FAIL")
self.copy_cores("FAIL")
self.addCleanup(intercept)
def tearDown(self):
if self.checkSuccess() and self.machine.ssh_reachable:
self.check_journal_messages()
shutil.rmtree(self.tmpdir)
def login_and_go(self, path=None, user=None, host=None, authorized=True):
self.machine.start_cockpit(host)
self.browser.login_and_go(path, user=user, host=host, authorized=authorized)
allowed_messages = [
# This is a failed login, which happens every time
"Returning error-response 401 with reason `Sorry'",
# Reauth stuff
'.*Reauthorizing unix-user:.*',
'.*user .* was reauthorized.*',
# Happens when the user logs out during reauthorization
"Error executing command as another user: Not authorized",
"This incident has been reported.",
# Reboots are ok
"-- Reboot --",
# Sometimes D-Bus goes away before us during shutdown
"Lost the name com.redhat.Cockpit on the session message bus",
"GLib-GIO:ERROR:gdbusobjectmanagerserver\\.c:.*:g_dbus_object_manager_server_emit_interfaces_.*: assertion failed \\(error == NULL\\): The connection is closed \\(g-io-error-quark, 18\\)",
"Error sending message: The connection is closed",
# Will go away with glib 2.43.2
".*: couldn't write web output: Error sending data: Connection reset by peer",
# pam_lastlog outdated complaints
".*/var/log/lastlog: No such file or directory",
# ssh messages may be dropped when closing
'10.*: dropping message while waiting for child to exit',
# SELinux messages to ignore
"(audit: )?type=1403 audit.*",
"(audit: )?type=1404 audit.*",
# https://bugzilla.redhat.com/show_bug.cgi?id=1298157
"(audit: )?type=1400 .*granted.*comm=\"tuned\".*",
# https://bugzilla.redhat.com/show_bug.cgi?id=1298171
"(audit: )?type=1400 .*denied.*comm=\"iptables\".*name=\"xtables.lock\".*",
# https://bugzilla.redhat.com/show_bug.cgi?id=1386624
".*type=1400 .*denied { name_bind } for.*dhclient.*",
# https://bugzilla.redhat.com/show_bug.cgi?id=1419263
".*type=1400 .*denied { write } for.*firewalld.*__pycache__.*",
# https://bugzilla.redhat.com/show_bug.cgi?id=1242656
"(audit: )?type=1400 .*denied.*comm=\"cockpit-ws\".*name=\"unix\".*dev=\"proc\".*",
"(audit: )?type=1400 .*denied.*comm=\"ssh-transport-c\".*name=\"unix\".*dev=\"proc\".*",
"(audit: )?type=1400 .*denied.*comm=\"cockpit-ssh\".*name=\"unix\".*dev=\"proc\".*",
# https://bugzilla.redhat.com/show_bug.cgi?id=1374820
"(audit: )?type=1400 .*denied.*comm=\"systemd\" path=\"/run/systemd/inaccessible/blk\".*",
# SELinux fighting with systemd: https://bugzilla.redhat.com/show_bug.cgi?id=1253319
"(audit: )?type=1400 audit.*systemd-journal.*path=2F6D656D66643A73642D73797374656D642D636F726564756D202864656C6574656429",
# SELinux and plymouth: https://bugzilla.redhat.com/show_bug.cgi?id=1427884
"(audit: )?type=1400 audit.*connectto.*plymouth.*unix_stream_socket.*",
# SELinux and nfs-utils fighting: https://bugzilla.redhat.com/show_bug.cgi?id=1447854
".*type=1400 .*denied { execute } for.*sm-notify.*init_t.*",
# SELinux prevents agetty from being executed by systemd: https://bugzilla.redhat.com/show_bug.cgi?id=1449569
".*type=1400 .*denied { execute } for.*agetty.*init_t.*",
# apparmor loading
"(audit: )?type=1400.*apparmor=\"STATUS\".*",
# apparmor noise
"(audit: )?type=1400.*apparmor=\"ALLOWED\".*",
# Messages from systemd libraries when they are in debug mode
'Successfully loaded SELinux database in.*',
'calling: info',
'Sent message type=method_call sender=.*',
'Got message type=method_return sender=.*',
# HACK: https://github.com/systemd/systemd/pull/1758
'Error was encountered while opening journal files:.*',
'Failed to get data: Cannot assign requested address',
# HACK https://bugzilla.redhat.com/show_bug.cgi?id=1461893
# selinux errors while logging in via ssh
'type=1401 audit(.*): op=security_compute_av reason=bounds .* tclass=process perms=transition.*',
# Various operating systems see this from time to time
"Journal file.*truncated, ignoring file.",
]
def allow_journal_messages(self, *patterns):
"""Don't fail if the journal containes a entry matching the given regexp"""
for p in patterns:
self.allowed_messages.append(p)
def allow_hostkey_messages(self):
self.allow_journal_messages('.*: .* host key for server is not known: .*',
'.*: refusing to connect to unknown host: .*',
'.*: failed to retrieve resource: hostkey-unknown')
def allow_restart_journal_messages(self):
self.allow_journal_messages(".*Connection reset by peer.*",
".*Broken pipe.*",
"g_dbus_connection_real_closed: Remote peer vanished with error: Underlying GIOStream returned 0 bytes on an async read \\(g-io-error-quark, 0\\). Exiting.",
"connection unexpectedly closed by peer",
"peer did not close io when expected",
# HACK: https://bugzilla.redhat.com/show_bug.cgi?id=1141137
"localhost: bridge program failed: Child process killed by signal 9",
"request timed out, closing",
"PolicyKit daemon disconnected from the bus.",
".*couldn't create polkit session subject: No session for pid.*",
"We are no longer a registered authentication agent.",
".*: failed to retrieve resource: terminated",
# HACK: https://bugzilla.redhat.com/show_bug.cgi?id=1253319
'audit:.*denied.*2F6D656D66643A73642D73797374656D642D636F726564756D202864656C.*',
'audit:.*denied.*comm="systemd-user-se".*nologin.*',
'localhost: dropping message while waiting for child to exit',
'.*: GDBus.Error:org.freedesktop.PolicyKit1.Error.Failed: .*',
'.*g_dbus_connection_call_finish_internal.*G_IS_DBUS_CONNECTION.*',
)
def allow_authorize_journal_messages(self):
self.allow_journal_messages("cannot reauthorize identity.*:.*unix-user:admin.*",
".*: pam_authenticate failed: Authentication failure",
".*is not in the sudoers file. This incident will be reported.",
".*: a password is required",
"user user was reauthorized",
"sudo: unable to resolve host .*",
".*: sorry, you must have a tty to run sudo",
".*/pkexec: bridge exited",
"We trust you have received the usual lecture from the local System",
"Administrator. It usually boils down to these three things:",
"#1\) Respect the privacy of others.",
"#2\) Think before you type.",
"#3\) With great power comes great responsibility.",
".*Sorry, try again.",
".*incorrect password attempt.*")
def check_journal_messages(self, machine=None):
"""Check for unexpected journal entries."""
machine = machine or self.machine
syslog_ids = [ "cockpit-ws", "cockpit-bridge" ]
messages = machine.journal_messages(syslog_ids, 5)
messages += machine.audit_messages("14") # 14xx is selinux
all_found = True
first = None
for m in messages:
# remove leading/trailing whitespace
m = m.strip()
found = False
for p in self.allowed_messages:
match = re.match(p, m)
if match and match.group(0) == m:
found = True
break
if not found:
print "Unexpected journal message '%s'" % m
all_found = False
if not first:
first = m
if not all_found:
self.copy_js_log("FAIL")
self.copy_journal("FAIL")
self.copy_cores("FAIL")
raise Error(first)
def snapshot(self, title, label=None):
"""Take a snapshot of the current screen and save it as a PNG.
Arguments:
title: Used for the filename.
"""
if self.browser is not None:
self.browser.snapshot(title, label)
def copy_js_log(self, title, label=None):
if self.browser is not None:
self.browser.copy_js_log(title, label)
def copy_journal(self, title, label=None):
for name, m in self.machines.iteritems():
if m.ssh_reachable:
log = "%s-%s-%s.log" % (label or self.label(), m.label, title)
with open(log, "w") as fp:
m.execute("journalctl", stdout=fp)
print "Journal extracted to %s" % (log)
attach(log)
def copy_cores(self, title, label=None):
for name, m in self.machines.iteritems():
if m.ssh_reachable:
directory = "%s-%s-%s.core" % (label or self.label(), m.label, title)
dest = os.path.abspath(directory)
m.download_dir("/var/lib/systemd/coredump", dest)
try:
os.rmdir(dest)
except OSError, ex:
if ex.errno == errno.ENOTEMPTY:
print "Core dumps downloaded to %s" % (dest)
attach(dest)
some_failed = False
def jsquote(str):
return json.dumps(str)
# See phantom-driver for the methods that are defined
class Phantom:
def __init__(self, lang=None):
self.lang = lang
self.timeout = 60
self.valid = False
self._driver = None
def __getattr__(self, name):
if not name.startswith("_"):
return lambda *args: self._invoke(name, *args)
raise AttributeError
def _invoke(self, name, *args):
if not self._driver:
self.start()
if opts.trace:
print "-> {0}({1})".format(name, repr(args)[1:-2])
line = json.dumps({
"cmd": name,
"args": args,
"timeout": self.timeout * 1000
}).replace("\n", " ") + "\n"
self._driver.stdin.write(line)
line = self._driver.stdout.readline()
if not line:
self.kill()
raise Error("PhantomJS or driver broken")
try:
res = json.loads(line)
except:
print line.strip()
raise
if 'error' in res:
if opts.trace:
print "<- raise", res['error']
raise Error(res['error'])
if 'result' in res:
if opts.trace:
print "<-", repr(res['result'])
return res['result']
raise Error("unexpected: " + line.strip())
def start(self):
environ = os.environ.copy()
if self.lang:
environ["LC_ALL"] = self.lang
path = os.path.dirname(__file__)
command = [
"%s/phantom-command" % path,
"%s/phantom-driver.js" % path,
"%s/sizzle.js" % path,
"%s/phantom-lib.js" % path
]
self.valid = True
self._driver = subprocess.Popen(command, env=environ,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
def kill(self):
self.valid = False
if self._driver:
self._driver.terminate()
self._driver.wait()
self._driver = None
def skipImage(reason, *args):
if testvm.DEFAULT_IMAGE in args:
return unittest.skip("{0}: {1}".format(testvm.DEFAULT_IMAGE, reason))
return lambda func: func
class Policy(object):
def __init__(self, retryable=True):
self.retryable = retryable
def normalize_traceback(self, trace):
# All file paths converted to basename
return re.sub(r'File "[^"]*/([^/"]+)"', 'File "\\1"', trace.strip())
def check_issue(self, trace):
cmd = [ "image-naughty", testvm.DEFAULT_IMAGE ]
try:
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(output, error) = proc.communicate(str(trace))
except OSError, ex:
if getattr(ex, 'errno', 0) != errno.ENOENT:
sys.stderr.write("Couldn't check known issue: {0}\n".format(str(ex)))
output = ""
return output
def check_retry(self, trace, tries):
# Never try more than five times
if not self.retryable or tries >= 5:
return False
# We check for persistent but test harness or framework specific
# failures that otherwise cause flakiness and false positives.
#
# The things we check here must:
# * have no impact on users of Cockpit in the real world
# * be things we tried to resolve in other ways. This is a last resort
#
trace = self.normalize_traceback(trace)
# HACK: An issue in phantomjs and QtWebkit
# http://stackoverflow.com/questions/35337304/qnetworkreply-network-access-is-disabled-in-qwebview
# https://github.com/owncloud/client/issues/3600
# https://github.com/ariya/phantomjs/issues/14789
if "PhantomJS or driver broken" in trace:
return True
# HACK: A race issue in phantomjs that happens randomly
# https://github.com/ariya/phantomjs/issues/12750
if "Resource Error: Operation canceled" in trace:
return True
# HACK: Interacting with sshd during boot is not always predictable
# We're using an implementation detail of the server as our "way in" for testing.
# This often has to do with sshd being restarted for some reason
if "SSH master process exited with code: 255" in trace:
return True
# HACK: Intermittently the new libvirt machine won't get an IP address
# or SSH will completely fail to start. We've tried various approaches
# to minimize this, but it happens every 100,000 tests or so
if "Failure: Unable to reach machine " in trace:
return True
# HACK: For when the verify machine runs out of available processes
# We should retry this test process
if "self.pid = os.fork()\nOSError: [Errno 11] Resource temporarily unavailable" in trace:
return True
return False
class TestResult(tap.TapResult):
def __init__(self, stream, descriptions, verbosity):
self.policy = None
super(TestResult, self).__init__(verbosity)
def maybeIgnore(self, test, err):
string = self._exc_info_to_string(err, test)
if self.policy:
issue = self.policy.check_issue(string)
if issue:
self.addSkip(test, "Known issue #{0}".format(issue))
return True
tries = getattr(test, "retryCount", 1)
if self.policy.check_retry(string, tries):
self.offset -= 1
setattr(test, "retryCount", tries + 1)
test.doCleanups()
raise RetryError("Retrying due to failure of test harness or framework")
return False
def addError(self, test, err):
if not self.maybeIgnore(test, err):
super(TestResult, self).addError(test, err)
def addFailure(self, test, err):
if not self.maybeIgnore(test, err):
super(TestResult, self).addError(test, err)
def startTest(self, test):
sys.stdout.write("# {0}\n# {1}\n#\n".format('-' * 70, str(test)))
sys.stdout.flush()
super(TestResult, self).startTest(test)
def stopTest(self, test):
sys.stdout.write("\n")
sys.stdout.flush()
super(TestResult, self).stopTest(test)
class OutputBuffer(object):
def __init__(self):
self.poll = select.poll()
self.buffers = { }
self.fds = { }
def drain(self):
while self.fds:
for p in self.poll.poll(1000):
data = os.read(p[0], 1024)
if data == "":
self.poll.unregister(p[0])
else:
self.buffers[p[0]] += data
else:
break
def push(self, pid, fd):
self.poll.register(fd, select.POLLIN)
self.fds[pid] = fd
self.buffers[fd] = ""
def pop(self, pid):
fd = self.fds.pop(pid)
buffer = self.buffers.pop(fd)
try:
self.poll.unregister(fd)
except KeyError:
pass
while True:
data = os.read(fd, 1024)
if data == "":
break
buffer += data
os.close(fd)
return buffer
class TapRunner(object):
resultclass = TestResult
def __init__(self, verbosity=1, jobs=1, thorough=False):
self.stream = unittest.runner._WritelnDecorator(sys.stderr)
self.verbosity = verbosity
self.thorough = thorough
self.jobs = jobs
def runOne(self, test, offset):
result = TestResult(self.stream, False, self.verbosity)
result.offset = offset
if not self.thorough:
result.policy = Policy()
try:
test(result)
except KeyboardInterrupt:
return False
except:
sys.stderr.write("Unexpected exception while running {0}\n".format(test))
traceback.print_exc(file=sys.stderr)
return False
else:
result.printErrors()
return result.wasSuccessful()
def run(self, testable):
tap.TapResult.plan(testable)
count = testable.countTestCases()
# For statistics
start = time.time()
pids = set()
options = 0
buffer = None
if self.jobs > 1:
buffer = OutputBuffer()
options = os.WNOHANG
offset = 0
failures = { "count": 0 }
def join_some(n):
while len(pids) > n:
if buffer:
buffer.drain()
try:
(pid, code) = os.waitpid(-1, options)
except KeyboardInterrupt:
sys.exit(255)
if pid:
if buffer:
sys.stdout.write(buffer.pop(pid))
pids.remove(pid)
if code & 0xff:
failed = 1
else:
failed = (code >> 8) & 0xff
failures["count"] += failed
for test in testable:
join_some(self.jobs - 1)
# Fork off a child process for each test
if buffer:
(rfd, wfd) = os.pipe()
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if not pid:
if buffer:
os.dup2(wfd, 1)
os.dup2(wfd, 2)
random.seed()
if self.runOne(test, offset):
sys.exit(0)
else:
sys.exit(1)
# The parent process
pids.add(pid)
if buffer:
os.close(wfd)
buffer.push(pid, rfd)
offset += test.countTestCases()
# Wait for the remaining subprocesses
join_some(0)
# Report on the results
duration = int(time.time() - start)
hostname = socket.gethostname().split(".")[0]
details = "[{0}s on {1}]".format(duration, hostname)
count = failures["count"]
if count:
sys.stdout.write("# {0} TESTS FAILED {1}\n".format(count, details))
else:
sys.stdout.write("# TESTS PASSED {0}\n".format(details))
return count
def arg_parser():
parser = argparse.ArgumentParser(description='Run Cockpit test(s)')
parser.add_argument('-j', '--jobs', dest="jobs", type=int,
default=os.environ.get("TEST_JOBS", 1), help="Number of concurrent jobs")
parser.add_argument('-v', '--verbose', dest="verbosity", action='store_const',
const=2, help='Verbose output')
parser.add_argument('-t', "--trace", dest='trace', action='store_true',
help='Trace machine boot and commands')
parser.add_argument('-q', '--quiet', dest='verbosity', action='store_const',
const=0, help='Quiet output')
parser.add_argument('--thorough', dest='thorough', action='store_true',
help='Thorough mode, no skipping known issues')
parser.add_argument('-s', "--sit", dest='sit', action='store_true',
help="Sit and wait after test failure")
parser.add_argument('--nonet', dest="fetch", action="store_false",
help="Don't go online to download images or data")
parser.add_argument('tests', nargs='*')
parser.set_defaults(verbosity=1, fetch=True)
return parser
def test_main(options=None, suite=None, attachments=None, **kwargs):
"""
Run all test cases, as indicated by arguments.
If no arguments are given on the command line, all test cases are
executed. Otherwise only the given test cases are run.
"""
global opts
# Turn off python stdout buffering
sys.stdout.flush()
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
standalone = options is None
parser = arg_parser()
parser.add_argument('--machine', dest="address", action="store",
default=None, help="Run this test against an already running machine")
if standalone:
options = parser.parse_args()
# Have to copy into opts due to python globals across modules
for (key, value) in vars(options).items():
setattr(opts, key, value);
if opts.sit and opts.jobs > 1:
parser.error("the -s or --sit argument not avalible with multiple jobs")
opts.address = getattr(opts, "address", None)
opts.attachments = os.environ.get("TEST_ATTACHMENTS", attachments)
if opts.attachments and not os.path.exists(opts.attachments):
os.makedirs(opts.attachments)
import __main__
if len(opts.tests) > 0:
if suite:
parser.error("tests may not be specified when running a predefined test suite")
suite = unittest.TestLoader().loadTestsFromNames(opts.tests, module=__main__)
elif not suite:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
runner = TapRunner(verbosity=opts.verbosity, jobs=opts.jobs, thorough=opts.thorough)
ret = runner.run(suite)
if not standalone:
return ret
sys.exit(ret)
class Error(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class RetryError(Error):
pass
def wait(func, msg=None, delay=1, tries=60):
"""
Wait for FUNC to return something truthy, and return that.
FUNC is called repeatedly until it returns a true value or until a
timeout occurs. In the latter case, a exception is raised that
describes the situation. The exception is either the last one
thrown by FUNC, or includes MSG, or a default message.
Arguments:
func: The function to call.
msg: A error message to use when the timeout occurs. Defaults
to a generic message.
delay: How long to wait between calls to FUNC, in seconds.
Defaults to 1.
tries: How often to call FUNC. Defaults to 60.
Raises:
Error: When a timeout occurs.
"""
t = 0
while t < tries:
try:
val = func()
if val:
return val
except:
if t == tries-1:
raise
else:
pass
t = t + 1
sleep(delay)
raise Error(msg or "Condition did not become true.")
def sit(machines={ }):
"""
Wait until the user confirms to continue.
The current test case is suspended so that the user can inspect
the browser.
"""
for (name, machine) in machines.items():
sys.stderr.write(machine.diagnose())
raw_input ("Press RET to continue... ")
|
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/baseClass/after/src/a.py
|
83
|
from b import B
class C(B):
def __init__(self):
super(C, self).__init__()
|
hgl888/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
native_client_sdk/src/tools/tests/chrome_mock.py
|
107
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import sys
import time
import urllib2
def PrintAndFlush(s):
print s
sys.stdout.flush()
def main(args):
parser = optparse.OptionParser(usage='%prog [options] <URL to load>')
parser.add_option('--post', help='POST to URL.', dest='post',
action='store_true')
parser.add_option('--get', help='GET to URL.', dest='get',
action='store_true')
parser.add_option('--sleep',
help='Number of seconds to sleep after reading URL',
dest='sleep', default=0)
parser.add_option('--expect-to-be-killed', help='If set, the script will warn'
' if it isn\'t killed before it finishes sleeping.',
dest='expect_to_be_killed', action='store_true')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Expected URL to load.')
PrintAndFlush('Starting %s.' % sys.argv[0])
if options.post:
urllib2.urlopen(args[0], data='').read()
elif options.get:
urllib2.urlopen(args[0]).read()
else:
# Do nothing but wait to be killed.
pass
time.sleep(float(options.sleep))
if options.expect_to_be_killed:
PrintAndFlush('Done sleeping. Expected to be killed.')
sys.exit(0)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
teamtuga4/teamtuga4ever.repository
|
refs/heads/master
|
plugin.video.pancas/genesisresolvers.py
|
266
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,urlparse,re,os,sys,xbmc,xbmcgui,xbmcaddon,xbmcvfs
try:
import CommonFunctions as common
except:
import commonfunctionsdummy as common
try:
import json
except:
import simplejson as json
class get(object):
def __init__(self, url):
self.result = self.worker(url)
def worker(self, url):
try:
pz = premiumize().resolve(url)
if not pz == None: return pz
rd = realdebrid().resolve(url)
if not rd == None: return rd
if url.startswith('rtmp'):
if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
return url
u = urlparse.urlparse(url).netloc
u = u.replace('www.', '').replace('embed.', '')
u = u.lower()
import sys, inspect
r = inspect.getmembers(sys.modules[__name__], inspect.isclass)
r = [i for i in r if hasattr(i[1], 'info') and u in eval(i[0])().info()['netloc']][0][0]
r = eval(r)().resolve(url)
if r == None: return r
elif type(r) == list: return r
elif not r.startswith('http'): return r
try: h = dict(urlparse.parse_qsl(r.rsplit('|', 1)[1]))
except: h = dict('')
h.update({'Referer': url, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'})
r = '%s|%s' % (r.split('|')[0], urllib.urlencode(h))
return r
except:
return url
class getUrl(object):
def __init__(self, url, close=True, proxy=None, post=None, headers=None, mobile=False, referer=None, cookie=None, output='', timeout='10'):
handlers = []
if not proxy == None:
handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if output == 'cookie' or not close == True:
import cookielib
cookies = cookielib.LWPCookieJar()
handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
if sys.version_info < (2, 7, 9): raise Exception()
import ssl; ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
handlers += [urllib2.HTTPSHandler(context=ssl_context)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
except:
pass
try: headers.update(headers)
except: headers = {}
if 'User-Agent' in headers:
pass
elif not mobile == True:
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'
else:
headers['User-Agent'] = 'Apple-iPhone/701.341'
if 'referer' in headers:
pass
elif referer == None:
headers['referer'] = url
else:
headers['referer'] = referer
if not 'Accept-Language' in headers:
headers['Accept-Language'] = 'en-US'
if 'cookie' in headers:
pass
elif not cookie == None:
headers['cookie'] = cookie
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
if output == 'cookie':
result = []
for c in cookies: result.append('%s=%s' % (c.name, c.value))
result = "; ".join(result)
elif output == 'geturl':
result = response.geturl()
else:
result = response.read()
if close == True:
response.close()
self.result = result
class captcha:
def worker(self, data):
self.captcha = {}
self.solvemedia(data)
if not self.type == None: return self.captcha
self.recaptcha(data)
if not self.type == None: return self.captcha
self.capimage(data)
if not self.type == None: return self.captcha
self.numeric(data)
if not self.type == None: return self.captcha
def solvemedia(self, data):
try:
url = common.parseDOM(data, "iframe", ret="src")
url = [i for i in url if 'api.solvemedia.com' in i]
if len(url) > 0: self.type = 'solvemedia'
else: self.type = None ; return
result = getUrl(url[0], referer='').result
response = common.parseDOM(result, "iframe", ret="src")
response += common.parseDOM(result, "img", ret="src")
response = [i for i in response if '/papi/media' in i][0]
response = 'http://api.solvemedia.com' + response
response = self.keyboard(response)
post = {}
f = common.parseDOM(result, "form", attrs = { "action": "verify.noscript" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'adcopy_response': response})
getUrl('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)).result
self.captcha.update({'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'})
except:
pass
def recaptcha(self, data):
try:
url = []
if data.startswith('http://www.google.com'): url += [data]
url += common.parseDOM(data, "script", ret="src", attrs = { "type": "text/javascript" })
url = [i for i in url if 'http://www.google.com' in i]
if len(url) > 0: self.type = 'recaptcha'
else: self.type = None ; return
result = getUrl(url[0]).result
challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0]
response = 'http://www.google.com/recaptcha/api/image?c=' + challenge
response = self.keyboard(response)
self.captcha.update({'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response})
except:
pass
def capimage(self, data):
try:
url = common.parseDOM(data, "img", ret="src")
url = [i for i in url if 'captcha' in i]
if len(url) > 0: self.type = 'capimage'
else: self.type = None ; return
response = self.keyboard(url[0])
self.captcha.update({'code': response})
except:
pass
def numeric(self, data):
try:
url = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(data)
if len(url) > 0: self.type = 'numeric'
else: self.type = None ; return
result = sorted(url[0], key=lambda ltr: int(ltr[0]))
response = ''.join(str(int(num[1])-48) for num in result)
self.captcha.update({'code': response})
except:
pass
def keyboard(self, response):
try:
dataPath = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo("profile"))
i = os.path.join(dataPath.decode("utf-8"),'img')
f = xbmcvfs.File(i, 'w')
f.write(getUrl(response).result)
f.close()
f = xbmcgui.ControlImage(450,5,375,115, i)
d = xbmcgui.WindowDialog()
d.addControl(f)
xbmcvfs.delete(i)
d.show()
xbmc.sleep(3000)
t = 'Type the letters in the image'
c = common.getUserInput(t, '')
d.close()
return c
except:
return
class regex:
def worker(self, data):
try:
data = str(data).replace('\r','').replace('\n','').replace('\t','')
url = re.compile('(.+?)<regex>').findall(data)[0]
regex = re.compile('<regex>(.+?)</regex>').findall(data)
except:
return
for x in regex:
try:
name = re.compile('<name>(.+?)</name>').findall(x)[0]
expres = re.compile('<expres>(.+?)</expres>').findall(x)[0]
referer = re.compile('<referer>(.+?)</referer>').findall(x)[0]
referer = urllib.unquote_plus(referer)
referer = common.replaceHTMLCodes(referer)
referer = referer.encode('utf-8')
page = re.compile('<page>(.+?)</page>').findall(x)[0]
page = urllib.unquote_plus(page)
page = common.replaceHTMLCodes(page)
page = page.encode('utf-8')
result = getUrl(page, referer=referer).result
result = str(result).replace('\r','').replace('\n','').replace('\t','')
result = str(result).replace('\/','/')
r = re.compile(expres).findall(result)[0]
url = url.replace('$doregex[%s]' % name, r)
except:
pass
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
class unwise:
def worker(self, str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=self.__unwise(w,i,s,e)
except: return
return page_value
def __unwise(self, w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return self.worker(ret)
else:
return ret
class js:
def worker(self, script):
aSplit = script.split(";',")
p = str(aSplit[0])
aSplit = aSplit[1].split(",")
a = int(aSplit[0])
c = int(aSplit[1])
k = aSplit[2].split(".")[0].replace("'", '').split('|')
e = ''
d = ''
sUnpacked = str(self.__unpack(p, a, c, k, e, d))
sUnpacked = sUnpacked.replace('\\', '')
url = self.__parse(sUnpacked)
return url
def __unpack(self, p, a, c, k, e, d):
while (c > 1):
c = c -1
if (k[c]):
p = re.sub('\\b' + str(self.__itoa(c, a)) +'\\b', k[c], p)
return p
def __itoa(self, num, radix):
result = ""
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __parse(self, sUnpacked):
url = re.compile("'file' *, *'(.+?)'").findall(sUnpacked)
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(sUnpacked)
url += re.compile("playlist=(.+?)&").findall(sUnpacked)
url += common.parseDOM(sUnpacked, "embed", ret="src")
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[-1].split('://', 1)[-1]
return url
class premiumize:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("premiumize_user")
self.password = xbmcaddon.Addon().getSetting("premiumize_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s' % (self.user, self.password)
result = getUrl(url).result
pz = json.loads(result)['result']['hosterlist']
pz = [i.rsplit('.' ,1)[0].lower() for i in pz]
return pz
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink¶ms[login]=%s¶ms[pass]=%s¶ms[link]=%s' % (self.user, self.password, urllib.quote_plus(url))
result = getUrl(url, close=False).result
url = json.loads(result)['result']['location']
return url
except:
return
class realdebrid:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("realdedrid_user")
self.password = xbmcaddon.Addon().getSetting("realdedrid_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://real-debrid.com/api/hosters.php'
result = getUrl(url).result
rd = json.loads('[%s]' % result)
rd = [i.rsplit('.' ,1)[0].lower() for i in rd]
return rd
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
login_data = urllib.urlencode({'user' : self.user, 'pass' : self.password})
login_link = 'http://real-debrid.com/ajax/login.php?%s' % login_data
result = getUrl(login_link, close=False).result
result = json.loads(result)
error = result['error']
if not error == 0: raise Exception()
url = 'http://real-debrid.com/ajax/unrestrict.php?link=%s' % url
url = url.replace('filefactory.com/stream/', 'filefactory.com/file/')
result = getUrl(url).result
result = json.loads(result)
url = result['generated_links'][0][-1]
return url
except:
return
class _180upload:
def info(self):
return {
'netloc': ['180upload.com'],
'host': ['180upload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://180upload.com/embed-%s.html' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "id": "captchaForm" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class allmyvideos:
def info(self):
return {
'netloc': ['allmyvideos.net'],
'host': ['Allmyvideos'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://allmyvideos.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class bestreams:
def info(self):
return {
'netloc': ['bestreams.net'],
'host': ['Bestreams'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://bestreams.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class clicknupload:
def info(self):
return {
'netloc': ['clicknupload.com'],
'host': ['Clicknupload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="onClick")
url = [i for i in url if i.startswith('window.open')][0]
url = re.compile('[\'|\"](.+?)[\'|\"]').findall(url)[0]
return url
except:
return
class cloudzilla:
def info(self):
return {
'netloc': ['cloudzilla.to'],
'host': ['Cloudzilla'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/share/file/', '/embed/')
result = getUrl(url).result
url = re.compile('var\s+vurl *= *"(http.+?)"').findall(result)[0]
return url
except:
return
class coolcdn:
def info(self):
return {
'netloc': ['movshare.net', 'novamov.com', 'nowvideo.sx', 'videoweed.es'],
'host': ['Movshare', 'Novamov', 'Nowvideo', 'Videoweed'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
netloc = urlparse.urlparse(url).netloc
netloc = netloc.replace('www.', '').replace('embed.', '')
netloc = netloc.lower()
id = re.compile('//.+?/.+?/([\w]+)').findall(url)
id += re.compile('//.+?/.+?v=([\w]+)').findall(url)
id = id[0]
url = 'http://embed.%s/embed.php?v=%s' % (netloc, id)
result = getUrl(url).result
key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1]
try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1]
except: pass
url = 'http://www.%s/api/player.api.php?key=%s&file=%s' % (netloc, key, id)
result = getUrl(url).result
url = re.compile('url=(.+?)&').findall(result)[0]
return url
except:
return
class daclips:
def info(self):
return {
'netloc': ['daclips.in'],
'host': ['Daclips'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class datemule:
def info(self):
return {
'netloc': ['datemule.com']
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class fastvideo:
def info(self):
return {
'netloc': ['fastvideo.in', 'faststream.in'],
'host': ['Fastvideo', 'Faststream'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://fastvideo.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class filehoot:
def info(self):
return {
'netloc': ['filehoot.com'],
'host': ['Filehoot'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://filehoot.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class filenuke:
def info(self):
return {
'netloc': ['filenuke.com', 'sharesix.com'],
'host': ['Filenuke', 'Sharesix'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
try: f = common.parseDOM(result, "form", attrs = { "method": "POST" })[0]
except: f = ''
k = common.parseDOM(f, "input", ret="name")
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+lnk\d* *= *'(http.+?)'").findall(result)[0]
return url
except:
return
class googledocs:
def info(self):
return {
'netloc': ['docs.google.com', 'drive.google.com']
}
def resolve(self, url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = getUrl(url).result
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class googleplus:
def info(self):
return {
'netloc': ['plus.google.com', 'picasaweb.google.com']
}
def resolve(self, url):
try:
if 'picasaweb' in url.lower():
result = getUrl(url).result
aid = re.compile('aid=(\d*)').findall(result)[0]
pid = urlparse.urlparse(url).fragment
oid = re.compile('/(\d*)/').findall(urlparse.urlparse(url).path)[0]
key = urlparse.parse_qs(urlparse.urlparse(url).query)['authkey'][0]
url = 'http://plus.google.com/photos/%s/albums/%s/%s?authkey=%s' % (oid, aid, pid, key)
result = getUrl(url, mobile=True).result
u = re.compile('"(http[s]*://.+?videoplayback[?].+?)"').findall(result)[::-1]
u = [i.replace('\\u003d','=').replace('\\u0026','&') for i in u]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class gorillavid:
def info(self):
return {
'netloc': ['gorillavid.com', 'gorillavid.in'],
'host': ['Gorillavid'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://gorillavid.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class grifthost:
def info(self):
return {
'netloc': ['grifthost.com'],
'host': ['Grifthost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://grifthost.com/embed-%s.html' % url
result = getUrl(url).result
try:
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
f = f.replace('"submit"', '"hidden"')
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
except:
pass
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class hugefiles:
def info(self):
return {
'netloc': ['hugefiles.net'],
'host': ['Hugefiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
f += common.parseDOM(result, "form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('fileUrl\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
return url
except:
return
class ipithos:
def info(self):
return {
'netloc': ['ipithos.to'],
'host': ['Ipithos'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://ipithos.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class ishared:
def info(self):
return {
'netloc': ['ishared.eu'],
'host': ['iShared'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile('path *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class kingfiles:
def info(self):
return {
'netloc': ['kingfiles.net'],
'host': ['Kingfiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+download_url *= *'(.+?)'").findall(result)[0]
return url
except:
return
class mailru:
def info(self):
return {
'netloc': ['mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru']
}
def resolve(self, url):
try:
usr = re.compile('/mail/(.+?)/').findall(url)[0]
vid = re.compile('(\d*)[.]html').findall(url)[0]
url = 'http://videoapi.my.mail.ru/videos/mail/%s/_myvideo/%s.json?ver=0.2.60' % (usr, vid)
import requests
result = requests.get(url).content
cookie = requests.get(url).headers['Set-Cookie']
u = json.loads(result)['videos']
h = "|Cookie=%s" % urllib.quote(cookie)
url = []
try: url += [[{'quality': '1080p', 'url': i['url'] + h} for i in u if i['key'] == '1080p'][0]]
except: pass
try: url += [[{'quality': 'HD', 'url': i['url'] + h} for i in u if i['key'] == '720p'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i['url'] + h} for i in u if not (i['key'] == '1080p' or i ['key'] == '720p')][0]]
except: pass
if url == []: return
return url
except:
return
class mightyupload:
def info(self):
return {
'netloc': ['mightyupload.com'],
'host': ['Mightyupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.mightyupload.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class mooshare:
def info(self):
return {
'netloc': ['mooshare.biz'],
'host': ['Mooshare'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://mooshare.biz/embed-%s.html?play=1&confirm=Close+Ad+and+Watch+as+Free+User' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class movdivx:
def info(self):
return {
'netloc': ['movdivx.com'],
'host': ['Movdivx'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.movdivx.com/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class movpod:
def info(self):
return {
'netloc': ['movpod.net', 'movpod.in'],
'host': ['Movpod'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('/vid/', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://movpod.in/embed-%s.html' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class movreel:
def info(self):
return {
'netloc': ['movreel.com'],
'host': ['Movreel'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
user = xbmcaddon.Addon().getSetting("movreel_user")
password = xbmcaddon.Addon().getSetting("movreel_password")
login = 'http://movreel.com/login.html'
post = {'op': 'login', 'login': user, 'password': password, 'redirect': url}
post = urllib.urlencode(post)
result = getUrl(url, close=False).result
result += getUrl(login, post=post, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 3):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
url = re.compile('(<a .+?</a>)').findall(result)
url = [i for i in url if 'Download Link' in i][-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
time.sleep(1)
except:
return
class mrfile:
def info(self):
return {
'netloc': ['mrfile.me'],
'host': ['Mrfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('(<a\s+href=.+?>Download\s+.+?</a>)').findall(result)[-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class mybeststream:
def info(self):
return {
'netloc': ['mybeststream.xyz']
}
def resolve(self, url):
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')
result = getUrl(url, referer=referer).result
result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
result = unwise().worker(result)
strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
strm = [i for i in strm if i.startswith('rtmp')][0]
url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
return url
except:
return
class nosvideo:
def info(self):
return {
'netloc': ['nosvideo.com'],
'host': ['Nosvideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[0]
url = js().worker(result)
result = getUrl(url).result
url = common.parseDOM(result, "file")[0]
return url
except:
return
class openload:
def info(self):
return {
'netloc': ['openload.io'],
'host': ['Openload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "span", attrs = { "id": "realdownload" })[0]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class played:
def info(self):
return {
'netloc': ['played.to'],
'host': ['Played'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('//', '/')
url = re.compile('/.+?/([\w]+)').findall(url)[0]
url = 'http://played.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class primeshare:
def info(self):
return {
'netloc': ['primeshare.tv'],
'host': ['Primeshare'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "video")[0]
url = common.parseDOM(url, "source", ret="src", attrs = { "type": ".+?" })[0]
return url
except:
return
class sharerepo:
def info(self):
return {
'netloc': ['sharerepo.com'],
'host': ['Sharerepo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile("file *: *'(http.+?)'").findall(result)[-1]
return url
except:
return
class stagevu:
def info(self):
return {
'netloc': ['stagevu.com'],
'host': ['StageVu'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "embed", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class streamcloud:
def info(self):
return {
'netloc': ['streamcloud.eu'],
'host': ['Streamcloud'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamcloud.eu/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "class": "proform" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
post = post.replace('op=download1', 'op=download2')
result = getUrl(url, post=post).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class streamin:
def info(self):
return {
'netloc': ['streamin.to'],
'host': ['Streamin'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamin.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result)[-1]
return url
except:
return
class thefile:
def info(self):
return {
'netloc': ['thefile.me'],
'host': ['Thefile'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thefile.me/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class thevideo:
def info(self):
return {
'netloc': ['thevideo.me'],
'host': ['Thevideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thevideo.me/embed-%s.html' % url
result = getUrl(url).result
result = result.replace('\n','')
import ast
url = re.compile("'sources' *: *(\[.+?\])").findall(result)[-1]
url = ast.literal_eval(url)
url = url[-1]['file']
return url
except:
return
class tusfiles:
def info(self):
return {
'netloc': ['tusfiles.net'],
'host': ['Tusfiles'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadc:
def info(self):
return {
'netloc': ['uploadc.com', 'zalaa.com'],
'host': ['Uploadc', 'Zalaa'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://uploadc.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("'file' *, *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadrocket:
def info(self):
return {
'netloc': ['uploadrocket.net'],
'host': ['Uploadrocket'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = result.decode('iso-8859-1').encode('utf-8')
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "freeorpremium" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_isfree': 'Click for Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = result.decode('iso-8859-1').encode('utf-8')
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = result.decode('iso-8859-1').encode('utf-8')
url = common.parseDOM(result, "a", ret="href", attrs = { "onclick": "DL.+?" })[0]
return url
except:
return
class uptobox:
def info(self):
return {
'netloc': ['uptobox.com'],
'host': ['Uptobox'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "div", attrs = { "align": ".+?" })
url = [i for i in url if 'button_upload' in i][0]
url = common.parseDOM(url, "a", ret="href")[0]
url = ['http' + i for i in url.split('http') if 'uptobox.com' in i][0]
return url
except:
return
class v_vids:
def info(self):
return {
'netloc': ['v-vids.com'],
'host': ['V-vids'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="href", attrs = { "id": "downloadbutton" })[0]
return url
except:
return
class veehd:
def info(self):
return {
'netloc': ['veehd.com'],
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
result = result.replace('\n','')
url = re.compile('function\s*load_download.+?src\s*:\s*"(.+?)"').findall(result)[0]
url = urlparse.urljoin('http://veehd.com', url)
result = getUrl(url, close=False).result
i = common.parseDOM(result, "iframe", ret="src")
if len(i) > 0:
i = urlparse.urljoin('http://veehd.com', i[0])
getUrl(i, close=False).result
result = getUrl(url).result
url = re.compile('href *= *"([^"]+(?:mkv|mp4|avi))"').findall(result)
url += re.compile('src *= *"([^"]+(?:divx|avi))"').findall(result)
url += re.compile('"url" *: *"(.+?)"').findall(result)
url = urllib.unquote(url[0])
return url
except:
return
class vidbull:
def info(self):
return {
'netloc': ['vidbull.com'],
'host': ['Vidbull'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class videomega:
def info(self):
return {
'netloc': ['videomega.tv']
}
def resolve(self, url):
try:
url = urlparse.urlparse(url).query
url = urlparse.parse_qsl(url)[0][1]
url = 'http://videomega.tv/cdn.php?ref=%s' % url
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class vidplay:
def info(self):
return {
'netloc': ['vidplay.net'],
'host': ['Vidplay'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
u = 'http://vidplay.net/vidembed-%s' % url
url = getUrl(u, output='geturl').result
if u == url: raise Exception()
return url
except:
return
class vidspot:
def info(self):
return {
'netloc': ['vidspot.net'],
'host': ['Vidspot'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidspot.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
query = urlparse.urlparse(url).query
url = url[:url.find('?')]
url = '%s?%s&direct=false' % (url, query)
return url
except:
return
class vidto:
def info(self):
return {
'netloc': ['vidto.me'],
'host': ['Vidto'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidto.me/embed-%s.html' % url
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = re.sub(r'(\',\d*,\d*,)', r';\1', result)
url = js().worker(result)
return url
except:
return
class vidzi:
def info(self):
return {
'netloc': ['vidzi.tv'],
'host': ['Vidzi'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
result = result.replace('\n','')
result = re.compile('sources *: *\[.+?\]').findall(result)[-1]
result = re.compile('file *: *"(http.+?)"').findall(result)
url = [i for i in result if '.m3u8' in i]
if len(url) > 0: return url[0]
url = [i for i in result if not '.m3u8' in i]
if len(url) > 0: return url[0]
except:
return
class vimeo:
def info(self):
return {
'netloc': ['vimeo.com']
}
def resolve(self, url):
try:
url = [i for i in url.split('/') if i.isdigit()][-1]
url = 'http://player.vimeo.com/video/%s/config' % url
result = getUrl(url).result
result = json.loads(result)
u = result['request']['files']['h264']
url = None
try: url = u['hd']['url']
except: pass
try: url = u['sd']['url']
except: pass
return url
except:
return
class vk:
def info(self):
return {
'netloc': ['vk.com']
}
def resolve(self, url):
try:
url = url.replace('https://', 'http://')
result = getUrl(url).result
u = re.compile('url(720|540|480|360|240)=(.+?)&').findall(result)
url = []
try: url += [[{'quality': 'HD', 'url': i[1]} for i in u if i[0] == '720'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '540'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '480'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '360'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '240'][0]]
except: pass
if url == []: return
return url
except:
return
class vodlocker:
def info(self):
return {
'netloc': ['vodlocker.com'],
'host': ['Vodlocker'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vodlocker.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class xfileload:
def info(self):
return {
'netloc': ['xfileload.com'],
'host': ['Xfileload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 5):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
if 'download2' in result: raise Exception()
url = common.parseDOM(result, "a", ret="href", attrs = { "target": "" })[0]
return url
except:
time.sleep(1)
except:
return
class xvidstage:
def info(self):
return {
'netloc': ['xvidstage.com'],
'host': ['Xvidstage'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://xvidstage.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class youtube:
def info(self):
return {
'netloc': ['youtube.com'],
'host': ['Youtube'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
result = getUrl('http://www.youtube.com/watch?v=%s' % id).result
message = common.parseDOM(result, "div", attrs = { "id": "unavailable-submessage" })
message = ''.join(message)
alert = common.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" })
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id
return url
except:
return
class zettahost:
def info(self):
return {
'netloc': ['zettahost.tv'],
'host': ['Zettahost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://zettahost.tv/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
|
vinayan3/clpricehistory
|
refs/heads/master
|
django/contrib/localflavor/ro/forms.py
|
273
|
# -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, *args, **kwargs):
super(ROCIFField, self).__init__(r'^(RO)?[0-9]{2,10}', max_length=10,
min_length=2, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(key_iter.next())
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_error_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length=13,
min_length=13, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# check birthdate digits
import datetime
try:
datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(value_iter.next())
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': u'Enter a Romanian county code or name.',
}
def clean(self, value):
from ro_counties import COUNTIES_CHOICES
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
from ro_counties import COUNTIES_CHOICES
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length=40, min_length=24, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length=20, min_length=10, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length=6, min_length=6, *args, **kwargs)
|
jborean93/ansible
|
refs/heads/devel
|
hacking/build_library/build_ansible/__init__.py
|
12133432
| |
ryanjmccall/nupic
|
refs/heads/master
|
tests/integration/nupic/algorithms/monitor_mixin/__init__.py
|
12133432
| |
vishwaprakashmishra/xmatrix
|
refs/heads/master
|
vumi/transports/httprpc/tests/__init__.py
|
12133432
| |
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
tools/chrome_proxy/integration_tests/__init__.py
|
12133432
| |
ingenieroariel/geonode
|
refs/heads/master
|
geonode/contrib/api_basemaps/tests.py
|
12133432
| |
40223144/2015cdafinal
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/xml/sax/saxutils.py
|
730
|
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
|
lthurlow/Network-Grapher
|
refs/heads/master
|
proj/external/networkx-1.7/networkx/algorithms/tests/test_distance_regular.py
|
87
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDistanceRegular:
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
G.add_cycle([5,6,7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
|
rversteegen/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Lib/bsddb/test/test_lock.py
|
33
|
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_simple..." % self.__class__.__name__
anID = self.env.lock_id()
if verbose:
print "locker ID: %s" % anID
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print "Aquired lock: %s" % lock
self.env.lock_put(lock)
if verbose:
print "Released lock: %s" % lock
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_threaded..." % self.__class__.__name__
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
self.assertTrue((end_time-start_time) >= 0.0999)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
if db.version() >= (4,6):
self.assertTrue(deadlock_detection.count>0)
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print "%s: locker ID: %s" % (name, anID)
for i in xrange(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print "%s: Aquired %s lock: %s" % (name, lt, lock)
self.env.lock_put(lock)
if verbose:
print "%s: Released %s lock: %s" % (name, lt, lock)
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
Arulselvanmadhavan/Artist_Recognition_from_Audio_Features
|
refs/heads/master
|
MRTasks/parsingTasks/listS3Files.py
|
2
|
import sys
__author__ = 'arul'
from boto.s3.connection import S3Connection
if __name__ == '__main__':
access_key = sys.argv[1]
access_secret = sys.argv[2]
conn = S3Connection(access_key,access_secret)
bucket = conn.get_bucket('cs6240_msd')
for key in bucket.list(prefix='cs6240_msd/'):
print key
# print key.name.encode('utf-8')
|
reunition/reunition
|
refs/heads/master
|
reunition/settings/development.py
|
1
|
from .base import *
DEBUG = True
INTERNAL_IPS = ["127.0.0.1"]
SECRET_KEY = "secret"
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache"
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
## DJANGO DEBUG TOOLBAR SETTINGS
# https://django-debug-toolbar.readthedocs.org
def show_toolbar(request):
return not request.is_ajax() and request.user and request.user.is_superuser
MIDDLEWARE_CLASSES += ["debug_toolbar.middleware.DebugToolbarMiddleware", ]
INSTALLED_APPS += ["debug_toolbar", ]
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'HIDE_DJANGO_SQL': True,
'TAG': 'body',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
'SHOW_TOOLBAR_CALLBACK': 'reunition.settings.development.show_toolbar',
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
)
try:
from local_settings import *
except ImportError:
pass
|
CloudWareChile/OpenChile
|
refs/heads/master
|
openerp/addons/mrp_repair/wizard/__init__.py
|
445
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cancel_repair
import make_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Celedhrim/persomov
|
refs/heads/master
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/twitch.py
|
19
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
parse_iso8601,
)
class TwitchIE(InfoExtractor):
# TODO: One broadcast may be split into multiple videos. The key
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
# starts at 1 and increases. Can we treat all parts as one video?
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?twitch\.tv/
(?:
(?P<channelid>[^/]+)|
(?:(?:[^/]+)/v/(?P<vodid>[^/]+))|
(?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
(?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
)
/?(?:\#.*)?$
"""
_PAGE_LIMIT = 100
_API_BASE = 'https://api.twitch.tv'
_LOGIN_URL = 'https://secure.twitch.tv/user/login'
_TESTS = [{
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
}, {
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
}, {
'url': 'http://www.twitch.tv/vanillatv',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}]
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
response = super(TwitchIE, self)._download_json(url, video_id, note)
self._handle_error(response)
return response
def _extract_media(self, item, item_id):
ITEMS = {
'a': 'video',
'v': 'vod',
'c': 'chapter',
}
info = self._extract_info(self._download_json(
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s info JSON' % ITEMS[item]))
if item == 'v':
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % ITEMS[item])
formats = self._extract_m3u8_formats(
'http://usher.twitch.tv/vod/%s?nauth=%s&nauthsig=%s'
% (item_id, access_token['token'], access_token['sig']),
item_id, 'mp4')
info['formats'] = formats
return info
response = self._download_json(
'%s/api/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s playlist JSON' % ITEMS[item])
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info['title'],
'description': info['description'],
'duration': info['length'],
'thumbnail': info['preview'],
'uploader': info['channel']['display_name'],
'uploader_id': info['channel']['name'],
'timestamp': parse_iso8601(info['recorded_at']),
'view_count': info['views'],
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
authenticity_token = self._search_regex(
r'<input name="authenticity_token" type="hidden" value="([^"]+)"',
login_page, 'authenticity token')
login_form = {
'utf8': '✓'.encode('utf-8'),
'authenticity_token': authenticity_token,
'redirect_on_login': '',
'embed_form': 'false',
'mp_source_action': '',
'follow': '',
'user[login]': username,
'user[password]': password,
}
request = compat_urllib_request.Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
m = re.search(
r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
if m:
raise ExtractorError(
'Unable to login: %s' % m.group('msg').strip(), expected=True)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('chapterid'):
return self._extract_media('c', mobj.group('chapterid'))
"""
webpage = self._download_webpage(url, chapter_id)
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
if not m:
raise ExtractorError('Cannot find archive of a chapter')
archive_id = m.group(1)
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
doc = self._download_xml(
api, chapter_id,
note='Downloading chapter information',
errnote='Chapter information download failed')
for a in doc.findall('.//archive'):
if archive_id == a.find('./id').text:
break
else:
raise ExtractorError('Could not find chapter in chapter information')
video_url = a.find('./video_file_url').text
video_ext = video_url.rpartition('.')[2] or 'flv'
chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
chapter_info = self._download_json(
chapter_api_url, 'c' + chapter_id,
note='Downloading chapter metadata',
errnote='Download of chapter metadata failed')
bracket_start = int(doc.find('.//bracket_start').text)
bracket_end = int(doc.find('.//bracket_end').text)
# TODO determine start (and probably fix up file)
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
#video_url += '?start=' + TODO:start_timestamp
# bracket_start is 13290, but we want 51670615
self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
info = {
'id': 'c' + chapter_id,
'url': video_url,
'ext': video_ext,
'title': chapter_info['title'],
'thumbnail': chapter_info['preview'],
'description': chapter_info['description'],
'uploader': chapter_info['channel']['display_name'],
'uploader_id': chapter_info['channel']['name'],
}
return info
"""
elif mobj.group('videoid'):
return self._extract_media('a', mobj.group('videoid'))
elif mobj.group('vodid'):
return self._extract_media('v', mobj.group('vodid'))
elif mobj.group('channelid'):
channel_id = mobj.group('channelid')
info = self._download_json(
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
for counter in itertools.count(1):
response = self._download_json(
'%s/kraken/channels/%s/videos/?offset=%d&limit=%d'
% (self._API_BASE, channel_id, offset, limit),
channel_id, 'Downloading channel videos JSON page %d' % counter)
videos = response['videos']
if not videos:
break
entries.extend([self.url_result(video['url'], 'Twitch') for video in videos])
offset += limit
return self.playlist_result(entries, channel_id, channel_name)
|
pyhmsa/pyhmsa
|
refs/heads/master
|
pyhmsa/type/test_numerical.py
|
1
|
""" """
# Standard library modules.
import unittest
import logging
import pickle
# Third party modules.
import numpy as np
# Local modules.
from pyhmsa.type.numerical import convert_value, validate_dtype, convert_unit
# Globals and constants variables.
class TestModule(unittest.TestCase):
def setUp(self):
super().setUp()
def tearDown(self):
unittest.TestCase.tearDown(self)
def validate_dtype(self):
self.assertTrue(validate_dtype(np.uint8))
self.assertTrue(validate_dtype(np.dtype(np.uint8)))
self.assertTrue(validate_dtype(np.uint8(9)))
self.assertRaises(ValueError, validate_dtype, 'abc')
self.assertRaises(ValueError, validate_dtype, np.float128)
def testconvert_value(self):
# Numerical value
x = convert_value(5.0, 's')
self.assertAlmostEqual(5.0, x, 4)
self.assertEqual('s', x.unit)
x = convert_value(None, 'ms')
self.assertIsNone(x)
x = convert_value([5.0, 6.0, 7.0], 'A')
self.assertEqual(3, len(x))
self.assertAlmostEqual(5.0, x[0], 4)
self.assertAlmostEqual(6.0, x[1], 4)
self.assertAlmostEqual(7.0, x[2], 4)
self.assertEqual('A', x.unit)
x = convert_value(x)
self.assertEqual(3, len(x))
self.assertAlmostEqual(5.0, x[0], 4)
self.assertAlmostEqual(6.0, x[1], 4)
self.assertAlmostEqual(7.0, x[2], 4)
self.assertEqual('A', x.unit)
x = convert_value(x, 'nm')
self.assertEqual(3, len(x))
self.assertAlmostEqual(5.0, x[0], 4)
self.assertAlmostEqual(6.0, x[1], 4)
self.assertAlmostEqual(7.0, x[2], 4)
self.assertEqual('A', x.unit)
self.assertRaises(ValueError, convert_value, (5.0, 's', 'ms'), 'ks')
# Numpy type
x = convert_value(5.0)
self.assertAlmostEqual(5.0, x, 4)
self.assertTrue(hasattr(x, 'dtype'))
x = convert_value(np.uint32(9))
self.assertEqual(9, x)
self.assertEqual(np.uint32, x.dtype.type)
self.assertRaises(ValueError, convert_value, np.int8(5.0))
def testconvert_unit(self):
u = convert_value(5.0, 'km')
v = convert_unit('m', u)
self.assertAlmostEqual(5000.0, v, 4)
self.assertEqual('m', v.unit)
u = convert_value(5.0, 'km')
v = convert_unit('km', u)
self.assertAlmostEqual(5.0, v, 4)
self.assertEqual('km', v.unit)
v = convert_unit('m', 5.0, 'km')
self.assertAlmostEqual(5000.0, v, 4)
v = convert_unit('m', 5.0, u'\u00c5')
self.assertAlmostEqual(5e-10, v, 14)
v = convert_unit(u'\u00c5', 5.0, 'nm')
self.assertAlmostEqual(50.0, v, 4)
v = convert_unit('rad', 180.0, 'degrees')
self.assertAlmostEqual(3.1416, v, 4)
v = convert_unit('degrees', 3.1416, 'rad')
self.assertAlmostEqual(180.0, v, 2)
class Testarrayunit(unittest.TestCase):
def setUp(self):
super().setUp()
self.v = convert_value(5.0, 's')
def tearDown(self):
unittest.TestCase.tearDown(self)
def testpickle(self):
s = pickle.dumps(self.v)
v = pickle.loads(s)
self.assertAlmostEqual(5.0, v, 4)
self.assertEqual('s', v.unit)
def testformat(self):
self.assertEqual('5.0', '{0}'.format(self.v))
self.assertEqual('5.0', '{0:s}'.format(self.v))
self.assertEqual('5.000', '{0:.3f}'.format(self.v))
self.assertEqual('5.000 s', '{0:.3f} {0.unit:s}'.format(self.v))
def test__eq__(self):
self.assertTrue(5.0 == self.v)
self.assertTrue(convert_value(5.0, 's') == self.v)
self.assertFalse(convert_value(5.0, 'ms') == self.v)
def test__ne__(self):
self.assertFalse(5.0 != self.v)
self.assertFalse(convert_value(5.0, 's') != self.v)
self.assertTrue(convert_value(5.0, 'ms') != self.v)
if __name__ == '__main__': #pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
ovnicraft/odoo
|
refs/heads/8.0
|
addons/crm_profiling/crm_profiling.py
|
333
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question", required=True),
'answers_ids': fields.one2many("crm_profiling.answer", "question_id", "Available Answers", copy=True),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire", required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer", required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mhvk/astropy
|
refs/heads/placeholder
|
astropy/wcs/tests/conftest.py
|
8
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import wcs
from . helper import SimModelTAB
@pytest.fixture(scope='module')
def tab_wcs_2di():
model = SimModelTAB(nx=150, ny=200)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
return w
@pytest.fixture(scope='module')
def tab_wcsh_2di():
model = SimModelTAB(nx=150, ny=200)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
return w, hdulist
@pytest.fixture(scope='function')
def tab_wcs_2di_f():
model = SimModelTAB(nx=150, ny=200)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
return w
|
yamt/tempest
|
refs/heads/master
|
tempest/api/object_storage/test_container_services.py
|
11
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.object_storage import base
from tempest import test
class ContainerTest(base.BaseObjectTest):
def setUp(self):
super(ContainerTest, self).setUp()
self.containers = []
def tearDown(self):
self.delete_containers(self.containers)
super(ContainerTest, self).tearDown()
def _create_container(self):
# setup container
container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(container_name)
self.containers.append(container_name)
return container_name
def _create_object(self, container_name, object_name=None):
# setup object
if object_name is None:
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
self.object_client.create_object(container_name,
object_name,
data)
return object_name
@test.attr(type='smoke')
@test.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
def test_create_container(self):
container_name = data_utils.rand_name(name='TestContainer')
resp, body = self.container_client.create_container(container_name)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@test.idempotent_id('49f866ed-d6af-4395-93e7-4187eb56d322')
def test_create_container_overwrite(self):
# overwrite container with the same name
container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(container_name)
self.containers.append(container_name)
resp, _ = self.container_client.create_container(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@test.idempotent_id('c2ac4d59-d0f5-40d5-ba19-0635056d48cd')
def test_create_container_with_metadata_key(self):
# create container with the blank value of metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta': ''}
resp, _ = self.container_client.create_container(
container_name,
metadata=metadata)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
# if the value of metadata is blank, metadata is not registered
# in the server
self.assertNotIn('x-container-meta-test-container-meta', resp)
@test.idempotent_id('e1e8df32-7b22-44e1-aa08-ccfd8d446b58')
def test_create_container_with_metadata_value(self):
# create container with metadata value
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta': 'Meta1'}
resp, _ = self.container_client.create_container(
container_name,
metadata=metadata)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertIn('x-container-meta-test-container-meta', resp)
self.assertEqual(resp['x-container-meta-test-container-meta'],
metadata['test-container-meta'])
@test.idempotent_id('24d16451-1c0c-4e4f-b59c-9840a3aba40e')
def test_create_container_with_remove_metadata_key(self):
# create container with the blank value of remove metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata_1 = {'test-container-meta': 'Meta1'}
self.container_client.create_container(
container_name,
metadata=metadata_1)
self.containers.append(container_name)
metadata_2 = {'test-container-meta': ''}
resp, _ = self.container_client.create_container(
container_name,
remove_metadata=metadata_2)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta', resp)
@test.idempotent_id('8a21ebad-a5c7-4e29-b428-384edc8cd156')
def test_create_container_with_remove_metadata_value(self):
# create container with remove metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata)
self.containers.append(container_name)
resp, _ = self.container_client.create_container(
container_name,
remove_metadata=metadata)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta', resp)
@test.idempotent_id('95d3a249-b702-4082-a2c4-14bb860cf06a')
def test_delete_container(self):
# create a container
container_name = self._create_container()
# delete container, success asserted within
resp, _ = self.container_client.delete_container(container_name)
self.assertHeaders(resp, 'Container', 'DELETE')
self.containers.remove(container_name)
@test.attr(type='smoke')
@test.idempotent_id('312ff6bd-5290-497f-bda1-7c5fec6697ab')
def test_list_container_contents(self):
# get container contents list
container_name = self._create_container()
object_name = self._create_object(container_name)
resp, object_list = self.container_client.list_container_contents(
container_name)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.idempotent_id('4646ac2d-9bfb-4c7d-a3c5-0f527402b3df')
def test_list_container_contents_with_no_object(self):
# get empty container contents list
container_name = self._create_container()
resp, object_list = self.container_client.list_container_contents(
container_name)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual('', object_list.strip('\n'))
@test.idempotent_id('fe323a32-57b9-4704-a996-2e68f83b09bc')
def test_list_container_contents_with_delimiter(self):
# get container contents list using delimiter param
container_name = self._create_container()
object_name = data_utils.rand_name(name='TestObject/')
self._create_object(container_name, object_name)
params = {'delimiter': '/'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name.split('/')[0], object_list.strip('/\n'))
@test.idempotent_id('55b4fa5c-e12e-4ca9-8fcf-a79afe118522')
def test_list_container_contents_with_end_marker(self):
# get container contents list using end_marker param
container_name = self._create_container()
object_name = self._create_object(container_name)
params = {'end_marker': 'ZzzzObject1234567890'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.idempotent_id('196f5034-6ab0-4032-9da9-a937bbb9fba9')
def test_list_container_contents_with_format_json(self):
# get container contents list using format_json param
container_name = self._create_container()
self._create_object(container_name)
params = {'format': 'json'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertIsNotNone(object_list)
self.assertTrue([c['name'] for c in object_list])
self.assertTrue([c['hash'] for c in object_list])
self.assertTrue([c['bytes'] for c in object_list])
self.assertTrue([c['content_type'] for c in object_list])
self.assertTrue([c['last_modified'] for c in object_list])
@test.idempotent_id('655a53ca-4d15-408c-a377-f4c6dbd0a1fa')
def test_list_container_contents_with_format_xml(self):
# get container contents list using format_xml param
container_name = self._create_container()
self._create_object(container_name)
params = {'format': 'xml'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertIsNotNone(object_list)
self.assertEqual(object_list.tag, 'container')
self.assertTrue('name' in object_list.keys())
self.assertEqual(object_list.find(".//object").tag, 'object')
self.assertEqual(object_list.find(".//name").tag, 'name')
self.assertEqual(object_list.find(".//hash").tag, 'hash')
self.assertEqual(object_list.find(".//bytes").tag, 'bytes')
self.assertEqual(object_list.find(".//content_type").tag,
'content_type')
self.assertEqual(object_list.find(".//last_modified").tag,
'last_modified')
@test.idempotent_id('297ec38b-2b61-4ff4-bcd1-7fa055e97b61')
def test_list_container_contents_with_limit(self):
# get container contents list using limit param
container_name = self._create_container()
object_name = self._create_object(container_name)
params = {'limit': data_utils.rand_int_id(1, 10000)}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.idempotent_id('c31ddc63-2a58-4f6b-b25c-94d2937e6867')
def test_list_container_contents_with_marker(self):
# get container contents list using marker param
container_name = self._create_container()
object_name = self._create_object(container_name)
params = {'marker': 'AaaaObject1234567890'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.idempotent_id('58ca6cc9-6af0-408d-aaec-2a6a7b2f0df9')
def test_list_container_contents_with_path(self):
# get container contents list using path param
container_name = self._create_container()
object_name = data_utils.rand_name(name='Swift/TestObject')
self._create_object(container_name, object_name)
params = {'path': 'Swift'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.idempotent_id('77e742c7-caf2-4ec9-8aa4-f7d509a3344c')
def test_list_container_contents_with_prefix(self):
# get container contents list using prefix param
container_name = self._create_container()
object_name = self._create_object(container_name)
prefix_key = object_name[0:8]
params = {'prefix': prefix_key}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('96e68f0e-19ec-4aa2-86f3-adc6a45e14dd')
def test_list_container_metadata(self):
# List container metadata
container_name = self._create_container()
metadata = {'name': 'Pictures'}
self.container_client.update_container_metadata(
container_name,
metadata=metadata)
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertHeaders(resp, 'Container', 'HEAD')
self.assertIn('x-container-meta-name', resp)
self.assertEqual(resp['x-container-meta-name'], metadata['name'])
@test.idempotent_id('a2faf936-6b13-4f8d-92a2-c2278355821e')
def test_list_no_container_metadata(self):
# HEAD container without metadata
container_name = self._create_container()
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertHeaders(resp, 'Container', 'HEAD')
self.assertNotIn('x-container-meta-', str(resp))
@test.idempotent_id('cf19bc0b-7e16-4a5a-aaed-cb0c2fe8deef')
def test_update_container_metadata_with_create_and_delete_matadata(self):
# Send one request of adding and deleting metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata_1 = {'test-container-meta1': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata_1)
self.containers.append(container_name)
metadata_2 = {'test-container-meta2': 'Meta2'}
resp, _ = self.container_client.update_container_metadata(
container_name,
metadata=metadata_2,
remove_metadata=metadata_1)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
self.assertIn('x-container-meta-test-container-meta2', resp)
self.assertEqual(resp['x-container-meta-test-container-meta2'],
metadata_2['test-container-meta2'])
@test.idempotent_id('2ae5f295-4bf1-4e04-bfad-21e54b62cec5')
def test_update_container_metadata_with_create_metadata(self):
# update container metadata using add metadata
container_name = self._create_container()
metadata = {'test-container-meta1': 'Meta1'}
resp, _ = self.container_client.update_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertIn('x-container-meta-test-container-meta1', resp)
self.assertEqual(resp['x-container-meta-test-container-meta1'],
metadata['test-container-meta1'])
@test.idempotent_id('3a5ce7d4-6e4b-47d0-9d87-7cd42c325094')
def test_update_container_metadata_with_delete_metadata(self):
# update container metadata using delete metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta1': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata)
self.containers.append(container_name)
resp, _ = self.container_client.delete_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
@test.idempotent_id('31f40a5f-6a52-4314-8794-cd89baed3040')
def test_update_container_metadata_with_create_matadata_key(self):
# update container metadata with a blenk value of metadata
container_name = self._create_container()
metadata = {'test-container-meta1': ''}
resp, _ = self.container_client.update_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
@test.idempotent_id('a2e36378-6f1f-43f4-840a-ffd9cfd61914')
def test_update_container_metadata_with_delete_metadata_key(self):
# update container metadata with a blank value of matadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta1': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata)
self.containers.append(container_name)
metadata = {'test-container-meta1': ''}
resp, _ = self.container_client.delete_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
|
sudheerchintala/LearnEraPlatForm
|
refs/heads/master
|
common/djangoapps/track/tracker.py
|
239
|
"""
Module that tracks analytics events by sending them to different
configurable backends.
The backends can be configured using Django settings as the example
below::
TRACKING_BACKENDS = {
'tracker_name': {
'ENGINE': 'class.name.for.backend',
'OPTIONS': {
'host': ... ,
'port': ... ,
...
}
}
}
"""
import inspect
from importlib import import_module
from dogapi import dog_stats_api
from django.conf import settings
from track.backends import BaseBackend
__all__ = ['send']
backends = {}
def _initialize_backends_from_django_settings():
"""
Initialize the event tracking backends according to the
configuration in django settings
"""
backends.clear()
config = getattr(settings, 'TRACKING_BACKENDS', {})
for name, values in config.iteritems():
# Ignore empty values to turn-off default tracker backends
if values:
engine = values['ENGINE']
options = values.get('OPTIONS', {})
backends[name] = _instantiate_backend_from_name(engine, options)
def _instantiate_backend_from_name(name, options):
"""
Instantiate an event tracker backend from the full module path to
the backend class. Useful when setting backends from configuration
files.
"""
# Parse backend name
try:
parts = name.split('.')
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
except IndexError:
raise ValueError('Invalid event track backend %s' % name)
# Get and verify the backend class
try:
module = import_module(module_name)
cls = getattr(module, class_name)
if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):
raise TypeError
except (ValueError, AttributeError, TypeError, ImportError):
raise ValueError('Cannot find event track backend %s' % name)
backend = cls(**options)
return backend
@dog_stats_api.timed('track.send')
def send(event):
"""
Send an event object to all the initialized backends.
"""
dog_stats_api.increment('track.send.count')
for name, backend in backends.iteritems():
with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
backend.send(event)
_initialize_backends_from_django_settings()
|
qgis/QGIS
|
refs/heads/master
|
python/core/additions/fromfunction.py
|
57
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
fromfunction.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Denis Rouzaud
Email : denis@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from .qgstaskwrapper import QgsTaskWrapper
from qgis._core import QgsTask
@staticmethod
def fromFunction(description, function, *args, on_finished=None, flags=QgsTask.AllFlags, **kwargs):
"""
Creates a new QgsTask task from a python function.
Example:
def calculate(task):
# pretend this is some complex maths and stuff we want
# to run in the background
return 5*6
def calculation_finished(exception, value=None):
if not exception:
iface.messageBar().pushMessage(
'the magic number is {}'.format(value))
else:
iface.messageBar().pushMessage(
str(exception))
task = QgsTask.fromFunction('my task', calculate,
on_finished=calculation_finished)
QgsApplication.taskManager().addTask(task)
"""
assert function
return QgsTaskWrapper(description, flags, function, on_finished, *args, **kwargs)
|
aYukiSekiguchi/ACCESS-Chromium
|
refs/heads/master
|
tools/code_coverage/coverage_posix.py
|
9
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate and process code coverage.
TODO(jrg): rename this from coverage_posix.py to coverage_all.py!
Written for and tested on Mac, Linux, and Windows. To use this script
to generate coverage numbers, please run from within a gyp-generated
project.
All platforms, to set up coverage:
cd ...../chromium ; src/tools/gyp/gyp_dogfood -Dcoverage=1 src/build/all.gyp
Run coverage on...
Mac:
( cd src/chrome ; xcodebuild -configuration Debug -target coverage )
Linux:
( cd src/chrome ; hammer coverage )
# In particular, don't try and run 'coverage' from src/build
--directory=DIR: specify directory that contains gcda files, and where
a "coverage" directory will be created containing the output html.
Example name: ..../chromium/src/xcodebuild/Debug.
If not specified (e.g. buildbot) we will try and figure it out based on
other options (e.g. --target and --build-dir; see below).
--genhtml: generate html output. If not specified only lcov is generated.
--all_unittests: if present, run all files named *_unittests that we
can find.
--fast_test: make the tests run real fast (just for testing)
--strict: if a test fails, we continue happily. --strict will cause
us to die immediately.
--trim=False: by default we trim away tests known to be problematic on
specific platforms. If set to false we do NOT trim out tests.
--xvfb=True: By default we use Xvfb to make sure DISPLAY is valid
(Linux only). if set to False, do not use Xvfb. TODO(jrg): convert
this script from the compile stage of a builder to a
RunPythonCommandInBuildDir() command to avoid the need for this
step.
--timeout=SECS: if a subprocess doesn't have output within SECS,
assume it's a hang. Kill it and give up.
--bundles=BUNDLEFILE: a file containing a python list of coverage
bundles to be eval'd. Example contents of the bundlefile:
['../base/base.gyp:base_unittests']
This is used as part of the coverage bot.
If no other bundlefile-finding args are used (--target,
--build-dir), this is assumed to be an absolute path.
If those args are used, find BUNDLEFILE in a way consistent with
other scripts launched by buildbot. Example of another script
launched by buildbot:
http://src.chromium.org/viewvc/chrome/trunk/tools/buildbot/scripts/slave/runtest.py
--target=NAME: specify the build target (e.g. 'Debug' or 'Release').
This is used by buildbot scripts to help us find the output directory.
Must be used with --build-dir.
--build-dir=DIR: According to buildbot comments, this is the name of
the directory within the buildbot working directory in which the
solution, Debug, and Release directories are found.
It's usually "src/build", but on mac it's $DIR/../xcodebuild and on
Linux it's $DIR/out.
This is used by buildbot scripts to help us find the output directory.
Must be used with --target.
--no_exclusions: Do NOT use the exclusion list. This script keeps a
list of tests known to be problematic under coverage. For example,
ProcessUtilTest.SpawnChild will crash inside __gcov_fork() when
using the MacOS 10.6 SDK. Use of --no_exclusions prevents the use
of this exclusion list.
--dont-clear-coverage-data: Normally we clear coverage data from
previous runs. If this arg is used we do NOT clear the coverage
data.
Strings after all options are considered tests to run. Test names
have all text before a ':' stripped to help with gyp compatibility.
For example, ../base/base.gyp:base_unittests is interpreted as a test
named "base_unittests".
"""
import glob
import logging
import optparse
import os
import Queue
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
"""Global list of child PIDs to kill when we die."""
gChildPIDs = []
"""Exclusion list. Format is
{ platform: { testname: (exclusion1, exclusion2, ... ), ... } }
Platform is a match for sys.platform and can be a list.
Matching code does an 'if sys.platform in (the key):'.
Similarly, matching does an 'if testname in thefulltestname:'
The Chromium convention has traditionally been to place the
exclusion list in a distinct file. Unlike valgrind (which has
frequent changes when things break and are fixed), the expectation
here is that exclusions remain relatively constant (e.g. OS bugs).
If that changes, revisit the decision to place inclusions in this
script.
Details:
ProcessUtilTest.SpawnChild: chokes in __gcov_fork on 10.6
IPCFuzzingTest.MsgBadPayloadArgs: ditto
"""
gTestExclusions = {
'darwin2': { 'base_unittests': ('ProcessUtilTest.SpawnChild',),
'ipc_tests': ('IPCFuzzingTest.MsgBadPayloadArgs',), }
}
def TerminateSignalHandler(sig, stack):
"""When killed, try and kill our child processes."""
signal.signal(sig, signal.SIG_DFL)
for pid in gChildPIDs:
if 'kill' in os.__all__: # POSIX
os.kill(pid, sig)
else:
subprocess.call(['taskkill.exe', '/PID', str(pid)])
sys.exit(0)
class RunTooLongException(Exception):
"""Thrown when a command runs too long without output."""
pass
class BadUserInput(Exception):
"""Thrown when arguments from the user are incorrectly formatted."""
pass
class RunProgramThread(threading.Thread):
"""A thread to run a subprocess.
We want to print the output of our subprocess in real time, but also
want a timeout if there has been no output for a certain amount of
time. Normal techniques (e.g. loop in select()) aren't cross
platform enough. the function seems simple: "print output of child, kill it
if there is no output by timeout. But it was tricky to get this right
in a x-platform way (see warnings about deadlock on the python
subprocess doc page).
"""
# Constants in our queue
PROGRESS = 0
DONE = 1
def __init__(self, cmd):
super(RunProgramThread, self).__init__()
self._cmd = cmd
self._process = None
self._queue = Queue.Queue()
self._retcode = None
def run(self):
if sys.platform in ('win32', 'cygwin'):
return self._run_windows()
else:
self._run_posix()
def _run_windows(self):
# We need to save stdout to a temporary file because of a bug on the
# windows implementation of python which can deadlock while waiting
# for the IO to complete while writing to the PIPE and the pipe waiting
# on us and us waiting on the child process.
stdout_file = tempfile.TemporaryFile()
try:
self._process = subprocess.Popen(self._cmd,
stdin=subprocess.PIPE,
stdout=stdout_file,
stderr=subprocess.STDOUT)
gChildPIDs.append(self._process.pid)
try:
# To make sure that the buildbot don't kill us if we run too long
# without any activity on the console output, we look for progress in
# the length of the temporary file and we print what was accumulated so
# far to the output console to make the buildbot know we are making some
# progress.
previous_tell = 0
# We will poll the process until we get a non-None return code.
self._retcode = None
while self._retcode is None:
self._retcode = self._process.poll()
current_tell = stdout_file.tell()
if current_tell > previous_tell:
# Report progress to our main thread so we don't timeout.
self._queue.put(RunProgramThread.PROGRESS)
# And print what was accumulated to far.
stdout_file.seek(previous_tell)
print stdout_file.read(current_tell - previous_tell),
previous_tell = current_tell
# Don't be selfish, let other threads do stuff while we wait for
# the process to complete.
time.sleep(0.5)
# OK, the child process has exited, let's print its output to our
# console to create debugging logs in case they get to be needed.
stdout_file.flush()
stdout_file.seek(previous_tell)
print stdout_file.read(stdout_file.tell() - previous_tell)
except IOError, e:
logging.exception('%s', e)
pass
finally:
stdout_file.close()
# If we get here the process is done.
gChildPIDs.remove(self._process.pid)
self._queue.put(RunProgramThread.DONE)
def _run_posix(self):
"""No deadlock problem so use the simple answer. The windows solution
appears to add extra buffering which we don't want on other platforms."""
self._process = subprocess.Popen(self._cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
gChildPIDs.append(self._process.pid)
try:
while True:
line = self._process.stdout.readline()
if not line: # EOF
break
print line,
self._queue.put(RunProgramThread.PROGRESS, True)
except IOError:
pass
# If we get here the process is done.
gChildPIDs.remove(self._process.pid)
self._queue.put(RunProgramThread.DONE)
def stop(self):
self.kill()
def kill(self):
"""Kill our running process if needed. Wait for kill to complete.
Should be called in the PARENT thread; we do not self-kill.
Returns the return code of the process.
Safe to call even if the process is dead.
"""
if not self._process:
return self.retcode()
if 'kill' in os.__all__: # POSIX
os.kill(self._process.pid, signal.SIGKILL)
else:
subprocess.call(['taskkill.exe', '/PID', str(self._process.pid)])
return self.retcode()
def retcode(self):
"""Return the return value of the subprocess.
Waits for process to die but does NOT kill it explicitly.
"""
if self._retcode == None: # must be none, not 0/False
self._retcode = self._process.wait()
return self._retcode
def RunUntilCompletion(self, timeout):
"""Run thread until completion or timeout (in seconds).
Start the thread. Let it run until completion, or until we've
spent TIMEOUT without seeing output. On timeout throw
RunTooLongException.
"""
self.start()
while True:
try:
x = self._queue.get(True, timeout)
if x == RunProgramThread.DONE:
return self.retcode()
except Queue.Empty, e: # timed out
logging.info('TIMEOUT (%d seconds exceeded with no output): killing' %
timeout)
self.kill()
raise RunTooLongException()
class Coverage(object):
"""Doitall class for code coverage."""
def __init__(self, options, args):
super(Coverage, self).__init__()
logging.basicConfig(level=logging.DEBUG)
self.directory = options.directory
self.options = options
self.args = args
self.ConfirmDirectory()
self.directory_parent = os.path.dirname(self.directory)
self.output_directory = os.path.join(self.directory, 'coverage')
if not os.path.exists(self.output_directory):
os.mkdir(self.output_directory)
# The "final" lcov-format file
self.coverage_info_file = os.path.join(self.directory, 'coverage.info')
# If needed, an intermediate VSTS-format file
self.vsts_output = os.path.join(self.directory, 'coverage.vsts')
# Needed for Windows.
self.src_root = options.src_root
self.FindPrograms()
self.ConfirmPlatformAndPaths()
self.tests = []
self.xvfb_pid = 0
self.test_files = [] # List of files with test specifications.
self.test_filters = {} # Mapping from testname->--gtest_filter arg.
logging.info('self.directory: ' + self.directory)
logging.info('self.directory_parent: ' + self.directory_parent)
def FindInPath(self, program):
"""Find program in our path. Return abs path to it, or None."""
if not 'PATH' in os.environ:
logging.fatal('No PATH environment variable?')
sys.exit(1)
paths = os.environ['PATH'].split(os.pathsep)
for path in paths:
fullpath = os.path.join(path, program)
if os.path.exists(fullpath):
return fullpath
return None
def FindPrograms(self):
"""Find programs we may want to run."""
if self.IsPosix():
self.lcov_directory = os.path.join(sys.path[0],
'../../third_party/lcov/bin')
self.lcov = os.path.join(self.lcov_directory, 'lcov')
self.mcov = os.path.join(self.lcov_directory, 'mcov')
self.genhtml = os.path.join(self.lcov_directory, 'genhtml')
self.programs = [self.lcov, self.mcov, self.genhtml]
else:
# Hack to get the buildbot working.
os.environ['PATH'] += r';c:\coverage\coverage_analyzer'
os.environ['PATH'] += r';c:\coverage\performance_tools'
# (end hack)
commands = ['vsperfcmd.exe', 'vsinstr.exe', 'coverage_analyzer.exe']
self.perf = self.FindInPath('vsperfcmd.exe')
self.instrument = self.FindInPath('vsinstr.exe')
self.analyzer = self.FindInPath('coverage_analyzer.exe')
if not self.perf or not self.instrument or not self.analyzer:
logging.fatal('Could not find Win performance commands.')
logging.fatal('Commands needed in PATH: ' + str(commands))
sys.exit(1)
self.programs = [self.perf, self.instrument, self.analyzer]
def PlatformBuildPrefix(self):
"""Return a platform specific build directory prefix.
This prefix is prepended to the build target (Debug, Release) to
identify output as relative to the build directory.
These values are specific to Chromium's use of gyp.
"""
if self.IsMac():
return '../xcodebuild'
if self.IsWindows():
return ''
else: # Linux
return '../out' # assumes make, unlike runtest.py
def ConfirmDirectory(self):
"""Confirm correctness of self.directory.
If it exists, happiness. If not, try and figure it out in a
manner similar to FindBundlesFile(). The 'figure it out' case
happens with buildbot where the directory isn't specified
explicitly.
"""
if (not self.directory and
not (self.options.target and self.options.build_dir)):
logging.fatal('Must use --directory or (--target and --build-dir)')
sys.exit(1)
if not self.directory:
self.directory = os.path.join(self.options.build_dir,
self.PlatformBuildPrefix(),
self.options.target)
if os.path.exists(self.directory):
logging.info('Directory: ' + self.directory)
return
else:
logging.fatal('Directory ' +
self.directory + ' doesn\'t exist')
sys.exit(1)
def FindBundlesFile(self):
"""Find the bundlesfile.
The 'bundles' file can be either absolute path, or (if we are run
from buildbot) we need to find it based on other hints (--target,
--build-dir, etc).
"""
# If no bundle file, no problem!
if not self.options.bundles:
return
# If true, we're buildbot. Form a path.
# Else assume absolute.
if self.options.target and self.options.build_dir:
fullpath = os.path.join(self.options.build_dir,
self.PlatformBuildPrefix(),
self.options.target,
self.options.bundles)
self.options.bundles = fullpath
if os.path.exists(self.options.bundles):
logging.info('BundlesFile: ' + self.options.bundles)
return
else:
logging.fatal('bundlefile ' +
self.options.bundles + ' doesn\'t exist')
sys.exit(1)
def FindTests(self):
"""Find unit tests to run; set self.tests to this list.
Assume all non-option items in the arg list are tests to be run.
"""
# Before we begin, find the bundles file if not an absolute path.
self.FindBundlesFile()
# Small tests: can be run in the "chromium" directory.
# If asked, run all we can find.
if self.options.all_unittests:
self.tests += glob.glob(os.path.join(self.directory, '*_unittests'))
# Tests can come in as args directly, indirectly (through a file
# of test lists) or as a file of bundles.
all_testnames = self.args[:] # Copy since we might modify
for test_file in self.options.test_files:
f = open(test_file)
for line in f:
line = re.sub(r"#.*$", "", line)
line = re.sub(r"\s*", "", line)
if re.match("\s*$"):
continue
all_testnames.append(line)
f.close()
tests_from_bundles = None
if self.options.bundles:
try:
tests_from_bundles = eval(open(self.options.bundles).read())
except IOError:
logging.fatal('IO error in bundle file ' +
self.options.bundles + ' (doesn\'t exist?)')
except (NameError, SyntaxError):
logging.fatal('Parse or syntax error in bundle file ' +
self.options.bundles)
if hasattr(tests_from_bundles, '__iter__'):
all_testnames += tests_from_bundles
else:
logging.fatal('Fatal error with bundle file; could not get list from' +
self.options.bundles)
sys.exit(1)
# If told explicit tests, run those (after stripping the name as
# appropriate)
for testname in all_testnames:
mo = re.search(r"(.*)\[(.*)\]$", testname)
gtest_filter = None
if mo:
gtest_filter = mo.group(2)
testname = mo.group(1)
if ':' in testname:
testname = testname.split(':')[1]
self.tests += [os.path.join(self.directory, testname)]
if gtest_filter:
self.test_filters[testname] = gtest_filter
# Medium tests?
# Not sure all of these work yet (e.g. page_cycler_tests)
# self.tests += glob.glob(os.path.join(self.directory, '*_tests'))
# If needed, append .exe to tests since vsinstr.exe likes it that
# way.
if self.IsWindows():
for ind in range(len(self.tests)):
test = self.tests[ind]
test_exe = test + '.exe'
if not test.endswith('.exe') and os.path.exists(test_exe):
self.tests[ind] = test_exe
def TrimTests(self):
"""Trim specific tests for each platform."""
if self.IsWindows():
return
# TODO(jrg): remove when not needed
inclusion = ['unit_tests']
keep = []
for test in self.tests:
for i in inclusion:
if i in test:
keep.append(test)
self.tests = keep
logging.info('After trimming tests we have ' + ' '.join(self.tests))
return
if self.IsLinux():
# self.tests = filter(lambda t: t.endswith('base_unittests'), self.tests)
return
if self.IsMac():
exclusion = ['automated_ui_tests']
punted = []
for test in self.tests:
for e in exclusion:
if test.endswith(e):
punted.append(test)
self.tests = filter(lambda t: t not in punted, self.tests)
if punted:
logging.info('Tests trimmed out: ' + str(punted))
def ConfirmPlatformAndPaths(self):
"""Confirm OS and paths (e.g. lcov)."""
for program in self.programs:
if not os.path.exists(program):
logging.fatal('Program missing: ' + program)
sys.exit(1)
def Run(self, cmdlist, ignore_error=False, ignore_retcode=None,
explanation=None):
"""Run the command list; exit fatally on error.
Args:
cmdlist: a list of commands (e.g. to pass to subprocess.call)
ignore_error: if True log an error; if False then exit.
ignore_retcode: if retcode is non-zero, exit unless we ignore.
Returns: process return code.
Throws: RunTooLongException if the process does not produce output
within TIMEOUT seconds; timeout is specified as a command line
option to the Coverage class and is set on init.
"""
logging.info('Running ' + str(cmdlist))
t = RunProgramThread(cmdlist)
retcode = t.RunUntilCompletion(self.options.timeout)
if retcode:
if ignore_error or retcode == ignore_retcode:
logging.warning('COVERAGE: %s unhappy but errors ignored %s' %
(str(cmdlist), explanation or ''))
else:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(str(cmdlist), retcode))
sys.exit(retcode)
return retcode
def IsPosix(self):
"""Return True if we are POSIX."""
return self.IsMac() or self.IsLinux()
def IsMac(self):
return sys.platform == 'darwin'
def IsLinux(self):
return sys.platform.startswith('linux')
def IsWindows(self):
"""Return True if we are Windows."""
return sys.platform in ('win32', 'cygwin')
def ClearData(self):
"""Clear old gcda files and old coverage info files."""
if self.options.dont_clear_coverage_data:
print 'Clearing of coverage data NOT performed.'
return
print 'Clearing coverage data from previous runs.'
if os.path.exists(self.coverage_info_file):
os.remove(self.coverage_info_file)
if self.IsPosix():
subprocess.call([self.lcov,
'--directory', self.directory_parent,
'--zerocounters'])
shutil.rmtree(os.path.join(self.directory, 'coverage'))
def BeforeRunOneTest(self, testname):
"""Do things before running each test."""
if not self.IsWindows():
return
# Stop old counters if needed
cmdlist = [self.perf, '-shutdown']
self.Run(cmdlist, ignore_error=True)
# Instrument binaries
for fulltest in self.tests:
if os.path.exists(fulltest):
# See http://support.microsoft.com/kb/939818 for details on args
cmdlist = [self.instrument, '/d:ignorecverr', '/COVERAGE', fulltest]
self.Run(cmdlist, ignore_retcode=4,
explanation='OK with a multiple-instrument')
# Start new counters
cmdlist = [self.perf, '-start:coverage', '-output:' + self.vsts_output]
self.Run(cmdlist)
def BeforeRunAllTests(self):
"""Called right before we run all tests."""
if self.IsLinux() and self.options.xvfb:
self.StartXvfb()
def GtestFilter(self, fulltest, excl=None):
"""Return a --gtest_filter=BLAH for this test.
Args:
fulltest: full name of test executable
exclusions: the exclusions list. Only set in a unit test;
else uses gTestExclusions.
Returns:
String of the form '--gtest_filter=BLAH', or None.
"""
positive_gfilter_list = []
negative_gfilter_list = []
# Exclude all flaky and failing tests; they don't count for code coverage.
negative_gfilter_list += ('*.FLAKY_*', '*.FAILS_*')
if not self.options.no_exclusions:
exclusions = excl or gTestExclusions
excldict = exclusions.get(sys.platform)
if excldict:
for test in excldict.keys():
# example: if base_unittests in ../blah/blah/base_unittests.exe
if test in fulltest:
negative_gfilter_list += excldict[test]
fulltest_basename = os.path.basename(fulltest)
if fulltest_basename in self.test_filters:
specific_test_filters = self.test_filters[fulltest_basename].split('-')
if len(specific_test_filters) > 2:
logging.error('Multiple "-" symbols in filter list: %s' %
self.test_filters[fulltest_basename])
raise BadUserInput()
if len(specific_test_filters) == 2:
# Remove trailing ':'
specific_test_filters[0] = specific_test_filters[0][:-1]
if specific_test_filters[0]: # Test for no positive filters.
positive_gfilter_list += specific_test_filters[0].split(':')
if len(specific_test_filters) > 1:
negative_gfilter_list += specific_test_filters[1].split(':')
if not positive_gfilter_list and not negative_gfilter_list:
return None
result = '--gtest_filter='
if positive_gfilter_list:
result += ':'.join(positive_gfilter_list)
if negative_gfilter_list:
if positive_gfilter_list: result += ':'
result += '-' + ':'.join(negative_gfilter_list)
return result
def RunTests(self):
"""Run all unit tests and generate appropriate lcov files."""
self.BeforeRunAllTests()
for fulltest in self.tests:
if not os.path.exists(fulltest):
logging.info(fulltest + ' does not exist')
if self.options.strict:
sys.exit(2)
else:
logging.info('%s path exists' % fulltest)
cmdlist = [fulltest, '--gtest_print_time']
# If asked, make this REAL fast for testing.
if self.options.fast_test:
logging.info('Running as a FAST test for testing')
# cmdlist.append('--gtest_filter=RenderWidgetHost*')
# cmdlist.append('--gtest_filter=CommandLine*')
cmdlist.append('--gtest_filter=C*')
# Possibly add a test-specific --gtest_filter
filter = self.GtestFilter(fulltest)
if filter:
cmdlist.append(filter)
self.BeforeRunOneTest(fulltest)
logging.info('Running test ' + str(cmdlist))
try:
retcode = self.Run(cmdlist, ignore_retcode=True)
except SystemExit: # e.g. sys.exit() was called somewhere in here
raise
except: # can't "except WindowsError" since script runs on non-Windows
logging.info('EXCEPTION while running a unit test')
logging.info(traceback.format_exc())
retcode = 999
self.AfterRunOneTest(fulltest)
if retcode:
logging.info('COVERAGE: test %s failed; return code: %d.' %
(fulltest, retcode))
if self.options.strict:
logging.fatal('Test failure is fatal.')
sys.exit(retcode)
self.AfterRunAllTests()
def AfterRunOneTest(self, testname):
"""Do things right after running each test."""
if not self.IsWindows():
return
# Stop counters
cmdlist = [self.perf, '-shutdown']
self.Run(cmdlist)
full_output = self.vsts_output + '.coverage'
shutil.move(full_output, self.vsts_output)
# generate lcov!
self.GenerateLcovWindows(testname)
def AfterRunAllTests(self):
"""Do things right after running ALL tests."""
# On POSIX we can do it all at once without running out of memory.
# This contrasts with Windows where we must do it after each test.
if self.IsPosix():
self.GenerateLcovPosix()
# Only on Linux do we have the Xvfb step.
if self.IsLinux() and self.options.xvfb:
self.StopXvfb()
def StartXvfb(self):
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
Copied from http://src.chromium.org/viewvc/chrome/trunk/tools/buildbot/
scripts/slave/slave_utils.py?view=markup
with some simplifications (e.g. no need to use xdisplaycheck, save
pid in var not file, etc)
"""
logging.info('Xvfb: starting')
proc = subprocess.Popen(["Xvfb", ":9", "-screen", "0", "1024x768x24",
"-ac"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.xvfb_pid = proc.pid
if not self.xvfb_pid:
logging.info('Could not start Xvfb')
return
os.environ['DISPLAY'] = ":9"
# Now confirm, giving a chance for it to start if needed.
logging.info('Xvfb: confirming')
for test in range(10):
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.5)
if retcode != 0:
logging.info('Warning: could not confirm Xvfb happiness')
else:
logging.info('Xvfb: OK')
def StopXvfb(self):
"""Stop Xvfb if needed. Linux only."""
if self.xvfb_pid:
logging.info('Xvfb: killing')
try:
os.kill(self.xvfb_pid, signal.SIGKILL)
except:
pass
del os.environ['DISPLAY']
self.xvfb_pid = 0
def GenerateLcovPosix(self):
"""Convert profile data to lcov on Mac or Linux."""
start_dir = os.getcwd()
logging.info('GenerateLcovPosix: start_dir=' + start_dir)
if self.IsLinux():
# With Linux/make (e.g. the coverage_run target), the current
# directory for this command is .../build/src/chrome but we need
# to be in .../build/src for the relative path of source files
# to be correct. However, when run from buildbot, the current
# directory is .../build. Accommodate.
# On Mac source files are compiled with abs paths so this isn't
# a problem.
# This is a bit of a hack. The best answer is to require this
# script be run in a specific directory for all cases (from
# Makefile or from buildbot).
if start_dir.endswith('chrome'):
logging.info('coverage_posix.py: doing a "cd .." '
'to accomodate Linux/make PWD')
os.chdir('..')
elif start_dir.endswith('build'):
logging.info('coverage_posix.py: doing a "cd src" '
'to accomodate buildbot PWD')
os.chdir('src')
else:
logging.info('coverage_posix.py: NOT changing directory.')
elif self.IsMac():
pass
command = [self.mcov,
'--directory',
os.path.join(start_dir, self.directory_parent),
'--output',
os.path.join(start_dir, self.coverage_info_file)]
logging.info('Assembly command: ' + ' '.join(command))
retcode = subprocess.call(command)
if retcode:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(command[0], retcode))
if self.options.strict:
sys.exit(retcode)
if self.IsLinux():
os.chdir(start_dir)
if not os.path.exists(self.coverage_info_file):
logging.fatal('%s was not created. Coverage run failed.' %
self.coverage_info_file)
sys.exit(1)
def GenerateLcovWindows(self, testname=None):
"""Convert VSTS format to lcov. Appends coverage data to sum file."""
lcov_file = self.vsts_output + '.lcov'
if os.path.exists(lcov_file):
os.remove(lcov_file)
# generates the file (self.vsts_output + ".lcov")
cmdlist = [self.analyzer,
'-sym_path=' + self.directory,
'-src_root=' + self.src_root,
'-noxml',
self.vsts_output]
self.Run(cmdlist)
if not os.path.exists(lcov_file):
logging.fatal('Output file %s not created' % lcov_file)
sys.exit(1)
logging.info('Appending lcov for test %s to %s' %
(testname, self.coverage_info_file))
size_before = 0
if os.path.exists(self.coverage_info_file):
size_before = os.stat(self.coverage_info_file).st_size
src = open(lcov_file, 'r')
dst = open(self.coverage_info_file, 'a')
dst.write(src.read())
src.close()
dst.close()
size_after = os.stat(self.coverage_info_file).st_size
logging.info('Lcov file growth for %s: %d --> %d' %
(self.coverage_info_file, size_before, size_after))
def GenerateHtml(self):
"""Convert lcov to html."""
# TODO(jrg): This isn't happy when run with unit_tests since V8 has a
# different "base" so V8 includes can't be found in ".". Fix.
command = [self.genhtml,
self.coverage_info_file,
'--output-directory',
self.output_directory]
print >>sys.stderr, 'html generation command: ' + ' '.join(command)
retcode = subprocess.call(command)
if retcode:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(command[0], retcode))
if self.options.strict:
sys.exit(retcode)
def CoverageOptionParser():
"""Return an optparse.OptionParser() suitable for Coverage object creation."""
parser = optparse.OptionParser()
parser.add_option('-d',
'--directory',
dest='directory',
default=None,
help='Directory of unit test files')
parser.add_option('-a',
'--all_unittests',
dest='all_unittests',
default=False,
help='Run all tests we can find (*_unittests)')
parser.add_option('-g',
'--genhtml',
dest='genhtml',
default=False,
help='Generate html from lcov output')
parser.add_option('-f',
'--fast_test',
dest='fast_test',
default=False,
help='Make the tests run REAL fast by doing little.')
parser.add_option('-s',
'--strict',
dest='strict',
default=False,
help='Be strict and die on test failure.')
parser.add_option('-S',
'--src_root',
dest='src_root',
default='.',
help='Source root (only used on Windows)')
parser.add_option('-t',
'--trim',
dest='trim',
default=True,
help='Trim out tests? Default True.')
parser.add_option('-x',
'--xvfb',
dest='xvfb',
default=True,
help='Use Xvfb for tests? Default True.')
parser.add_option('-T',
'--timeout',
dest='timeout',
default=5.0 * 60.0,
type="int",
help='Timeout before bailing if a subprocess has no output.'
' Default is 5min (Buildbot is 10min.)')
parser.add_option('-B',
'--bundles',
dest='bundles',
default=None,
help='Filename of bundles for coverage.')
parser.add_option('--build-dir',
dest='build_dir',
default=None,
help=('Working directory for buildbot build.'
'used for finding bundlefile.'))
parser.add_option('--target',
dest='target',
default=None,
help=('Buildbot build target; '
'used for finding bundlefile (e.g. Debug)'))
parser.add_option('--no_exclusions',
dest='no_exclusions',
default=None,
help=('Disable the exclusion list.'))
parser.add_option('--dont-clear-coverage-data',
dest='dont_clear_coverage_data',
default=False,
action='store_true',
help=('Turn off clearing of cov data from a prev run'))
parser.add_option('-F',
'--test-file',
dest="test_files",
default=[],
action='append',
help=('Specify a file from which tests to be run will ' +
'be extracted'))
return parser
def main():
# Print out the args to help someone do it by hand if needed
print >>sys.stderr, sys.argv
# Try and clean up nice if we're killed by buildbot, Ctrl-C, ...
signal.signal(signal.SIGINT, TerminateSignalHandler)
signal.signal(signal.SIGTERM, TerminateSignalHandler)
parser = CoverageOptionParser()
(options, args) = parser.parse_args()
coverage = Coverage(options, args)
coverage.ClearData()
coverage.FindTests()
if options.trim:
coverage.TrimTests()
coverage.RunTests()
if options.genhtml:
coverage.GenerateHtml()
return 0
if __name__ == '__main__':
sys.exit(main())
|
Maccimo/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsNoinspectionNoElseReturnFollowup.py
|
10
|
def func():
value = "not-none"
<caret>if value is None:
print("None")
return
# noinspection SomeInspection
print(value)
|
thomashaw/SecGen
|
refs/heads/master
|
modules/utilities/unix/labtainers/files/Labtainers-master/labs/vpnlab/server/MyHTTPServer.py
|
2
|
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import os
import sys
PORT = 80
class MyHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
#log_file = open('myhttplogfile.txt', 'w')
log_file = open('/var/log/myhttplogfile.txt', 'w')
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
self.log_file.flush()
os.chdir('/home/ubuntu')
Handler = MyHTTPHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
keen99/SickRage
|
refs/heads/master
|
lib/sqlalchemy/dialects/postgresql/psycopg2.py
|
75
|
# postgresql/psycopg2.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows are
not immediately pre-fetched and buffered after statement execution, but are
instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the ``stream_results=True`` execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
* ``isolation_level``: This option, available for all Posgtresql dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect. See :ref:`psycopg2_isolation_level`.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* isolation_level - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
* stream_results - Enable or disable usage of psycopg2 server side cursors -
this feature makes use of "named" cursors in combination with special
result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
Unicode
-------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
.. versionadded:: 0.7.3
The psycopg2-specific ``client_encoding`` parameter to
:func:`.create_engine`.
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize it's own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as more DBAPIs support unicode fully along with the approach of
Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all Postgresql dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`.
When using the psycopg2 dialect, these options make use of
psycopg2's ``set_isolation_level()`` connection method, rather than
emitting a Postgresql directive; this is because psycopg2's API-level
setting is always emitted at the start of each transaction in any case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
psycopg2.
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. _psycopg2_hstore::
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the
HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when it is detected that the target database has the HSTORE
type set up for use. In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine that
the ``HSTORE`` extension is present.
2. If the ``use_native_hstore`` flag is at it's default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python dictionaries
being accepted as parameters regardless of the type of target column in SQL**.
The dictionaries are converted by this extension into a textual HSTORE expression.
If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()``
extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which
may be more performant.
"""
from __future__ import absolute_import
import re
import logging
from ... import util, exc
import decimal
from ... import processors
from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES
from .hstore import HSTORE
from .json import JSON
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self.convert_unicode is True:
# we can't easily use PG's extensions here because
# the OID is on the fly, and we need to give it a python
# function anyway - not really worth it.
self.convert_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
# When we're handed literal SQL, ensure it's a SELECT-query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression.TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
else:
is_server_side = \
self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
def get_result_proxy(self):
# TODO: ouch
if logger.isEnabledFor(logging.INFO):
self._log_notices(self.cursor)
if self.__is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_psycopg2(PGDialect):
driver = 'psycopg2'
if util.py2k:
supports_unicode_statements = False
default_paramstyle = 'pyformat'
supports_sane_multi_rowcount = False # set to true based on psycopg2 version
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
_has_native_hstore = False
_has_native_json = False
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
**kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
self._has_native_json = self.psycopg2_version >= (2, 5)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@util.memoized_property
def _isolation_lookup(self):
from psycopg2 import extensions
return {
'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
from psycopg2 import extras, extensions
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
if util.py2k:
extras.register_hstore(conn, oid=oid,
array_oid=array_oid,
unicode=True)
else:
extras.register_hstore(conn, oid=oid,
array_oid=array_oid)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
extras.register_default_json(conn, loads=self._json_deserializer)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= (2, 4):
from psycopg2 import extras
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
# TODO: these are sent through gettext in libpq and we can't
# check within other locales - consider using connection.closed
'terminating connection',
'closed the connection',
'connection not open',
'could not receive data from server',
'could not send data to server',
# psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h
'connection already closed',
'cursor already closed',
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
'losed the connection unexpectedly'
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
|
diegoguimaraes/django
|
refs/heads/master
|
django/contrib/gis/gdal/prototypes/generation.py
|
100
|
"""
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.prototypes.errcheck import (
check_arg_errcode, check_errcode, check_geom, check_geom_offset,
check_pointer, check_srs, check_str_arg, check_string, check_const_string)
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck:
func.errcheck = check_arg_errcode
if strarg:
func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None, decoding=None):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
res = check_const_string(result, func, cargs, offset=offset)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
res = check_string(result, func, cargs,
offset=offset, str_result=str_result)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes:
func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = check_errcode
else:
func.restype = None
return func
def voidptr_output(func, argtypes):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_pointer
return func
|
liushuaikobe/evermd
|
refs/heads/master
|
lib/evernote/api/__init__.py
|
12133432
| |
olegpshenichniy/Booktype
|
refs/heads/master
|
lib/booktypecontrol/migrations/__init__.py
|
12133432
| |
c-rhodes/hack2014
|
refs/heads/master
|
hack2014/category/__init__.py
|
12133432
| |
moreati/django
|
refs/heads/master
|
django/core/checks/security/__init__.py
|
12133432
| |
nlchap0/nlcpython
|
refs/heads/master
|
pywip/bin/example5.py
|
1
|
#!/usr/bin/env python
# A plot showing how to use different panels
from pywip import *
viewport(0.2,0.9,0.3,0.8) # Set the viewport
default(size=1.1) # Set the default character size
panel(1,nx=2,ny=2,gapx=3)
# Note that this curve will not appear in the legend
plot([3],[0],color='w',limits=(-0.1,6.4,-1.3,1.3),text=None)
axis(verticaly=True,drawtickx=False,drawticky=False)
axis(box=(),number=(),drawtickx=False,drawticky=False,gridx=True,gridy=True,
style=':')
panel(2)
plot(1,2,'plotdata.dat',style='s',fillcolor='k',limits=(0.1,10,0.11,5),
logx=True,logy=True,size=0.5,text='Data in panel 2')
errorbar(1,2,'plotdata.dat',yerr=4)
axis(gridx=True,gridy=True)
panel(3)
vector(1,2,3,4,'vector.dat',vent=0.35,size=0.25,start=0,limits=(-1.5,1.5,-1.5,1.5))
axis()
panel(4)
curve(text='Fake curve in panel 4',style='o')
axis()
legend(0.05,0.9,size=0.9)
# note the legend plots curves from all panels
savefig('plot-ex5.ps')
|
shashlik/android-skia
|
refs/heads/shashlik-kitkat
|
tools/test_pictures.py
|
67
|
'''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
# modules declared within this same directory
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PNG files, and then compares
those resulting PNG files against PNG files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def ModeParse(option, opt_str, value, parser):
"""Parses the --mode option of the commandline.
The --mode option will either take in three parameters (if tile or
pow2tile) or a single parameter (otherwise).
"""
result = [value]
if value == "tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode tile mising width"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
elif value == "pow2tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode pow2tile mising minWidth"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
setattr(parser.values, option.dest, result)
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ("specify the location to output the rendered files."
" Default is a temp directory."))
parser.add_option('--diff_dir', dest='diff_dir',
help = ("specify the location to output the diff files."
" Default is a temp directory."))
parser.add_option('--mode', dest='mode', type='string',
action="callback", callback=ModeParse,
help = ("specify how rendering is to be done."))
parser.add_option('--device', dest='device',
help = ("specify the device to render to."))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
extra_args = ''
if (options.mode is not None):
extra_args += ' --mode %s' % ' '.join(options.mode)
if (options.device is not None):
extra_args += ' --device %s' % options.device
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pictures',
extra_args)
if __name__ == '__main__':
Main(sys.argv)
|
kmatzen/ansible
|
refs/heads/devel
|
lib/ansible/plugins/shell/__init__.py
|
1
|
# (c) 2016 RedHat
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import pipes
import ansible.constants as C
import time
import random
from ansible.compat.six import text_type
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellBase(object):
def __init__(self):
self.env = dict(
LANG = C.DEFAULT_MODULE_LANG,
LC_ALL = C.DEFAULT_MODULE_LANG,
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
def env_prefix(self, **kwargs):
env = self.env.copy()
env.update(kwargs)
return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
# some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
def get_remote_filename(self, base_name):
return base_name.strip()
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, mode, path):
path = pipes.quote(path)
return 'chmod %s %s' % (mode, path)
def remove(self, path, recurse=False):
path = pipes.quote(path)
cmd = 'rm -f '
if recurse:
cmd += '-r '
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def exists(self, path):
cmd = ['test', '-e', pipes.quote(path)]
return ' '.join(cmd)
def mkdtemp(self, basefile=None, system=False, mode=None):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
basetmp = self.join_path('/tmp', basefile)
cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
return cmd
def expand_user(self, user_home_path):
''' Return a command to expand tildes in a path
It can be either "~" or "~username". We use the POSIX definition of
a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
'''
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
# pipes.quote will make the shell return the string verbatim
user_home_path = pipes.quote(user_home_path)
return 'echo %s' % user_home_path
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
# don't quote the cmd if it's an empty string, because this will break pipelining mode
if cmd.strip() != '':
cmd = pipes.quote(cmd)
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
if arg_path is not None:
cmd_parts.append(arg_path)
new_cmd = " ".join(cmd_parts)
if rm_tmp:
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
return new_cmd
|
macs03/demo-cms
|
refs/heads/master
|
cms/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/response.py
|
328
|
# urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from ._collections import HTTPHeaderDict
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
|
Zerknechterer/pyload
|
refs/heads/stable
|
module/plugins/hoster/SpeedLoadOrg.py
|
1
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class SpeedLoadOrg(DeadHoster):
__name__ = "SpeedLoadOrg"
__type__ = "hoster"
__version__ = "1.02"
__pattern__ = r'http://(?:www\.)?speedload\.org/(?P<ID>\w+)'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Speedload.org hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it")]
getInfo = create_getInfo(SpeedLoadOrg)
|
saurabh6790/omni-apps
|
refs/heads/master
|
stock/report/warehouse_wise_stock_balance/warehouse_wise_stock_balance.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_map(filters)
data = []
for company in sorted(iwb_map):
for item in sorted(iwb_map[company]):
for wh in sorted(iwb_map[company][item]):
qty_dict = iwb_map[company][item][wh]
data.append([item, item_map[item]["item_name"],
item_map[item]["description"], wh,
qty_dict.opening_qty, qty_dict.in_qty,
qty_dict.out_qty, qty_dict.bal_qty, company
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = ["Item:Link/Item:100", "Item Name::150", "Description::150", \
"Warehouse:Link/Warehouse:100", "Opening Qty:Float:90", \
"In Qty:Float:80", "Out Qty:Float:80", "Balance Qty:Float:90", "Company:Link/Company:100"]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
webnotes.msgprint("Please enter From Date", raise_exception=1)
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % filters["to_date"]
else:
webnotes.msgprint("Please enter To Date", raise_exception=1)
return conditions
#get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return webnotes.conn.sql("""select item_code, warehouse,
posting_date, actual_qty, company
from `tabStock Ledger Entry`
where docstatus < 2 %s order by item_code, warehouse""" %
conditions, as_dict=1)
def get_item_warehouse_map(filters):
sle = get_stock_ledger_entries(filters)
iwb_map = {}
for d in sle:
iwb_map.setdefault(d.company, {}).setdefault(d.item_code, {}).\
setdefault(d.warehouse, webnotes._dict({\
"opening_qty": 0.0, "in_qty": 0.0, "out_qty": 0.0, "bal_qty": 0.0
}))
qty_dict = iwb_map[d.company][d.item_code][d.warehouse]
if d.posting_date < filters["from_date"]:
qty_dict.opening_qty += flt(d.actual_qty)
elif d.posting_date >= filters["from_date"] and d.posting_date <= filters["to_date"]:
if flt(d.actual_qty) > 0:
qty_dict.in_qty += flt(d.actual_qty)
else:
qty_dict.out_qty += abs(flt(d.actual_qty))
qty_dict.bal_qty += flt(d.actual_qty)
return iwb_map
def get_item_details(filters):
item_map = {}
for d in webnotes.conn.sql("select name, item_name, description from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map
|
huor/incubator-hawq
|
refs/heads/master
|
src/test/unit/mock/mocker.py
|
7
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import optparse
import os
import re
import subprocess
import sys
import special
class CFile(object):
# multi-line comment
m_comment_pat = re.compile(r'/\*.*?\*/', re.DOTALL)
# single-line comment
s_comment_pat = re.compile(r'//.*$', re.MULTILINE)
# __attribute__((XXX)): it gets difficult to match arguments.
# Remove it as it's a noisy keyword for us.
attribute_pat = re.compile(r'__attribute__\s*\(\((format\s*\([^\)]+\)\s*|format_arg\s*\(\d+\)\s*|.+?)\)\)')
# function pattern
func_pat = re.compile(
# modifier
r'(?:(static|inline|__inline__|__inline|__MAYBE_UNUSED)\s+)*' +
# rettype
r'((?:const\s+)?(?:struct\s+|unsigned\s+)?\w+(?:[\s\*]+|\s+))(?:inline\s+|static\s+|__MAYBE_UNUSED\s+)?' +
# funcname
r'(\w+)\s*'
# arguments
r'\(([^{}\)]*?)\)\s*{', re.DOTALL)
# static variable pattern
# Currently this requires static keyword at the beginning of line.
###staticvar_pat = re.compile(r'^static.+?;', re.MULTILINE | re.DOTALL)
def __init__(self, path, options):
self.path = os.path.abspath(path)
self.options = options
#with open(self.make_i()) as f:
with open(self.path) as f:
self.content = self.strip(f.read())
def make_i(self):
"""create .i file from .c by using preprocessor with existing make
system. The CPPFLAGS may be different from time/env to time/env.
make will be the best way to preprocess it so far. Note we need
not only header file directory but also some definitions. For
example some debug symbols may not be found in the existing object
files if we didn't pass debug #define.
XXX: Currently we don't need this, but leave it now for future use.
"""
i_path = '{stem}.i'.format(stem=os.path.splitext(self.path)[0])
subprocess.check_call(['make', '--quiet', '-C', self.options.src_dir, i_path])
return i_path
def strip(self, content):
"""strip comments in the content
"""
content = CFile.m_comment_pat.sub('', content)
# backend/libpq/be-secure.c contains private key with '//'
if 'be-secure' not in self.path and 'guc.c' not in self.path and 'hd_work_mgr.c' not in self.path and 'fd.c' not in self.path:
content = CFile.s_comment_pat.sub('', content)
content = CFile.attribute_pat.sub('', content)
return content
def skip_func_body(self, content, index):
"""Skip function body by finding a line starting with a closing brace.
We wanted to count the number of open/close braces, but some file has
weird code block based on preprocessor directives.
"""
pat = re.compile(r'^}\s*$', re.MULTILINE)
if 'cdbfilerepconnserver' in self.path:
# FIXIT!: some of the files have unpleasant format.
pat = re.compile(r'^ ?}', re.MULTILINE)
m = pat.search(content, index)
if m:
if 'cdbgroup' in self.path:
if content[m.end()+1:].startswith('#endif'):
return self.skip_func_body(content, m.end())
return m.end()
raise StandardError('unexpected syntax')
def to_mock(self):
"""Mock up this file. The basic idea is to replace function body
with mocked up source. Other parts are preserved. Otherwise,
the source code messed up because of preprocessor directives.
"""
content = self.content
prev = 0
result = ''
for (func, m) in self.match_functions():
spos = m.start()
epos = m.end()
result += content[prev:spos]
result += func.to_mock()
prev = self.skip_func_body(content, epos)
result += content[prev:]
return result
def match_functions(self):
"""Iterator of function pattern matching.
"""
content = self.content
for m in CFile.func_pat.finditer(content):
(modifier, rettype, funcname, args) = m.groups('')
# 'else if(...){}' looks like a function. Ignore it.
if funcname in ['if', 'while', 'switch', 'for', 'foreach',
'yysyntax_error', 'defined']:
continue
if rettype.strip() in ['define']:
continue
func = FuncSignature(modifier, rettype, funcname, args)
yield (func, m)
class MockFile(object):
def __init__(self, cfile, options):
self.cfile = cfile
self.options = options
self.outname = self.output_filename()
def output_filename(self):
"""outname is src/test/unit/mock/backend/{path}/{stem}_mock.c
"""
src_dir = self.options.src_dir
relpath = os.path.relpath(self.cfile.path, src_dir)
out_dir = self.options.out_dir
out_dir = os.path.join(out_dir, os.path.dirname(relpath))
(stem, ext) = os.path.splitext(os.path.basename(relpath))
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except OSError:
pass
return os.path.join(out_dir, '{stem}_mock.c'.format(stem=stem))
def mock(self):
outname = self.outname
with open(outname, 'w') as f:
f.write("""/*
*
* Auto-generated Mocking Source
*
*/
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include "cmockery.h"
""")
f.write(self.cfile.to_mock())
return
class FuncSignature(object):
# This pattern needs to be fixed; if the argname is not present,
# we need extra space at the end.
arg_pat = re.compile(
# argtype. i.e. 'const unsigned long', 'struct Foo *', 'const char * const'
r'((?:register\s+|const\s+|volatile\s+)*(?:enum\s+|struct\s+|unsigned\s+|long\s+)?' +
r'\w+(?:[\s\*]+)(?:const[\s\*]+)?|\s+)' +
r'(?:__restrict\s+)?' +
# argname. We accept 'arg[]'
r'([\w\[\]]+)?')
Variadic = object()
def __init__(self, modifier, rettype, funcname, args):
self.modifier = modifier.strip()
self.rettype = re.sub('inline', '', rettype).strip()
self.rettype = re.sub('__MAYBE_UNUSED', '', rettype).strip()
self.funcname = funcname.strip()
self.args = self.parse_args(args)
def is_local(self):
"""Am I a local function?
"""
return bool(self.modifier)
def is_pointer_type(self, argtype):
"""Is the type pointer?
"""
return argtype[-1] == '*'
def is_variadic(self, arg):
return arg == FuncSignature.Variadic
def parse_args(self, arg_string):
args = []
arg_string = re.sub(r'\s+', ' ', arg_string)
if arg_string == 'void' or arg_string == '':
return args
for (i, arg) in enumerate(arg_string.split(',')):
arg = arg.strip()
# TODO: needs work
if arg == '...':
args.append(FuncSignature.Variadic)
continue
elif arg == 'PG_FUNCTION_ARGS':
args.append(('FunctionCallInfo', 'fcinfo'))
continue
elif arg == 'SIGNAL_ARGS':
args.append(('int', 'signal_args'))
continue
# general case
m = FuncSignature.arg_pat.match(arg.strip())
if not m:
print '%s %s(%s)' % (self.rettype, self.funcname, arg_string)
argtype = m.group(1)
argname = m.group(2) if m.group(2) else 'arg' + str(i)
args.append((argtype.strip(), argname.strip()))
return args
def format_args(self):
buf = []
for arg in self.args:
if self.is_variadic(arg):
buf.append('...')
continue
argtype = arg[0]
argname = arg[1]
buf.append(argtype + ' ' + argname)
if not buf:
buf = ['void']
return ', '.join(buf)
def make_body(self):
body = special.SpecialFuncs.make_body(self)
if body:
return body
subscript = re.compile('\[\d*\]$')
# otherwise, general method
buf = []
# emit check_expected()
for arg in self.args:
if self.is_variadic(arg):
continue
argtype = arg[0]
argname = arg[1]
ref = '&' if special.ByValStructs.has(argtype) else ''
argname = subscript.sub('', argname)
buf.append('\tcheck_expected({ref}{arg});'.format(ref=ref, arg=argname))
# if the type is pointer, call optional_assignment()
for arg in self.args:
if self.is_variadic(arg):
continue
argtype = arg[0]
argname = arg[1]
if not self.is_pointer_type(argtype):
continue
ref = '&' if special.ByValStructs.has(argtype) else ''
argname = subscript.sub('', argname)
buf.append('\toptional_assignment({ref}{arg});'.format(ref=ref, arg=argname))
# Currently, local function doesn't check arguments.
if self.is_local():
buf = []
if special.ByValStructs.has(self.rettype):
ret = ('\t{rettype} *ret = ({rettype} *) mock();\n' +
'\treturn *ret;').format(rettype=self.rettype)
elif self.rettype != 'void':
ret = '\treturn ({cast}) mock();'.format(cast=self.rettype)
else:
ret = '\tmock();'
buf.append(ret)
return '\n'.join(buf)
def to_mock(self):
mod_ret = self.rettype
if self.modifier:
mod_ret = self.modifier + ' ' + mod_ret
return """
{mod_ret}
{name}({args})
{{
{body}
}}
""".format(mod_ret=mod_ret, name=self.funcname, args=self.format_args(),
body=self.make_body())
def main():
logging.basicConfig(level=logging.INFO)
try:
mydir = os.path.dirname(os.path.realpath(__file__))
parser = optparse.OptionParser()
parser.add_option('--out-dir',
dest='out_dir',
default=os.path.join(mydir, '.'))
parser.add_option('--src-dir',
dest='src_dir',
default=os.path.join(mydir, '../../..'))
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error('insufficient arguments')
cfile = CFile(args[0], options)
mock = MockFile(cfile, options)
mock.mock()
except Exception as e:
logging.error('Error has occurred during parsing %s: %s' % (args[0], str(e)))
raise
if __name__ == '__main__':
main()
|
podgorskiy/TinyFEM
|
refs/heads/master
|
libs/cegui/cegui/src/ScriptModules/Python/bindings/distutils/setup.py
|
14
|
import sys
CEGUI_BASEDIR = "C:\\Users\\Admin\\Devel\\CEGUI\\cegui_mk2"
is_64bits = sys.maxsize > 2**32
BOOST_BASEDIR = "C:\\Program Files (x86)\\boost\\boost_1_46_1" if is_64bits else "C:\\Program Files\\boost\\boost_1_46_1"
from distutils.core import setup, Extension
from distutils.sysconfig import get_python_inc
import os
from glob import *
global_defines = []# [("CEGUI_STATIC", "1")]
# turn on exceptions for MSVC
global_extra_compile_args = ["/EHsc"]
global_include_dirs = [get_python_inc(plat_specific = True), "../", "../../../../../include", "../../../../../../build/cegui/include", BOOST_BASEDIR]
global_library_dirs = [CEGUI_BASEDIR + "/build/lib", BOOST_BASEDIR + "/lib"]
# Windows is special and picks the libraries magically!
global_libraries = []
PyCEGUI_sources = glob(os.path.join("..", "output", "CEGUI", "*.cpp"))
PyCEGUIOpenGLRenderer_sources = glob(os.path.join("..", "output", "CEGUIOpenGLRenderer", "*.cpp"))
PyCEGUIOgreRenderer_sources = glob(os.path.join("..", "output", "CEGUIOgreRenderer", "*.cpp"))
PyCEGUINullRenderer_sources = glob(os.path.join("..", "output", "CEGUINullRenderer", "*.cpp"))
setup(
name = "PyCEGUI",
version = "0.8",
description = "Python bindings for CEGUI library",
long_description =
"""Crazy Eddie's GUI System is a free library providing windowing
and widgets for graphics APIs / engines where such functionality
is not natively available, or severely lacking. The library is
object orientated, written in C++, and targeted at games developers
who should be spending their time creating great games, not building GUI sub-systems.
note: For Linux and MacOSX packages, see http://www.cegui.org.uk, we provide them
in SDKs. Distutils package is only provided for Windows since it's hard to
install the binding there as it involves lots of wrappers and nasty tricks.
Shame on you Windows!""",
author = "CEGUI team",
author_email = "team@cegui.org.uk",
#maintainer = "Martin Preisler", # authors get shadowed by this
#maintainer_email = "preisler.m@gmail.com",
url = "http://www.cegui.org.uk",
license = "MIT",
platforms = ["Windows", "Linux", "MacOSX"],
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: C++"
"Topic :: Games/Entertainment",
"Topic :: Multimedia :: Graphics",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: User Interfaces",
"Topic :: Software Development :: Widget Sets",
],
packages = [
"PyCEGUI" # nasty nasty wrapper for a nasty nasty OS
],
# this doesn't work as I expected so I am doing data files further down
#package_data = { "PyCEGUI/datafiles": [CEGUI_BASEDIR + "/datafiles/*"] },
ext_modules = [
Extension(
"PyCEGUI.PyCEGUI",
PyCEGUI_sources,
define_macros = global_defines,
extra_compile_args = global_extra_compile_args,
include_dirs = global_include_dirs + ["../output/CEGUI"],
library_dirs = global_library_dirs,
libraries = ["CEGUIBase"] + global_libraries,
),
Extension(
"PyCEGUIOpenGLRenderer",
PyCEGUIOpenGLRenderer_sources,
define_macros = global_defines,
extra_compile_args = global_extra_compile_args,
include_dirs = global_include_dirs + ["../output/CEGUIOpenGLRenderer"],
library_dirs = global_library_dirs,
libraries = ["CEGUIBase", "CEGUIOpenGLRenderer"] + global_libraries,
),
# no ogre for now, I will use SDK binaries later for this
#Extension(
# "PyCEGUIOgreRenderer",
# PyCEGUIOgreRenderer_sources,
# define_macros = global_defines,
# extra_compile_args = global_extra_compile_args,
# include_dirs = global_include_dirs + ["../output/CEGUIOgreRenderer"],
# library_dirs = global_library_dirs,
# libraries = ["CEGUIBase", "CEGUIOgreRenderer"] + global_libraries,
#),
Extension(
"PyCEGUINullRenderer",
PyCEGUINullRenderer_sources,
define_macros = global_defines,
extra_compile_args = global_extra_compile_args,
include_dirs = global_include_dirs + ["../output/CEGUINullRenderer"],
library_dirs = global_library_dirs,
libraries = ["CEGUIBase", "CEGUINullRenderer"] + global_libraries,
),
],
# the first string is directory where the files should go
# - leave empty for C:/Python26 for example
data_files = [
# we have to bundle CEGUIBase.dll, CEGUIOpenGLRenderer.dll, etc...
("Lib/site-packages/PyCEGUI",
# this is obviously a workaround, I would be happy to hear what the clean
# solution should look like
[
BOOST_BASEDIR + "/lib/boost_python-vc90-mt-1_46_1.dll",
CEGUI_BASEDIR + "/build/bin/CEGUIBase.dll",
CEGUI_BASEDIR + "/build/bin/CEGUIOpenGLRenderer.dll",
CEGUI_BASEDIR + "/build/bin/CEGUINullRenderer.dll",
CEGUI_BASEDIR + "/build/lib/CEGUICoreWindowRendererSet.dll",
CEGUI_BASEDIR + "/build/lib/CEGUIFreeImageImageCodec.dll",
CEGUI_BASEDIR + "/dependencies/bin/freeimage.dll",
CEGUI_BASEDIR + "/build/lib/CEGUIExpatParser.dll",
]
),
# distutils doesn't allow to bundle folders (or to be precise: I have no idea how to do that)
# therefore I do this the ugly way!
("Lib/site-packages/PyCEGUI/datafiles/animations",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/animations", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/configs",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/configs", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/fonts",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/fonts", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/imagesets",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/imagesets", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/layouts",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/layouts", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/looknfeel",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/looknfeel", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/lua_scripts",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/lua_scripts", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/schemes",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/schemes", "*")),
),
("Lib/site-packages/PyCEGUI/datafiles/xml_schemas",
glob(os.path.join(CEGUI_BASEDIR + "/datafiles/xml_schemas", "*")),
)
]
)
|
y-usuzumi/survive-the-course
|
refs/heads/master
|
leetcode/40.Combination_Sum_II/test.py
|
1
|
import unittest
from main import Solution
class SolutionTest(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def test_1(self):
self.assertEqual(
sorted(self.sol.combinationSum2([10, 1, 2, 7, 6, 1, 5], 8)),
sorted([
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
])
)
|
numenta-ci/nupic
|
refs/heads/master
|
src/nupic/datafiles/extra/secondOrder/makeDataset.py
|
34
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file_record_stream import FileRecordStream
def _generateModel0(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model0'. For this model, we generate the following
set of sequences:
1-2-3 (4X)
1-2-4 (1X)
5-2-3 (1X)
5-2-4 (4X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# ===============================================================
# Let's model the following:
# a-b-c (4X)
# a-b-d (1X)
# e-b-c (1X)
# e-b-d (4X)
# --------------------------------------------------------------------
# Initial probabilities, 'a' and 'e' equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[4] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 'a' and 'e' should lead to 'b'
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 4:
probs.fill(0)
probs[1] = 1.0 # lead only to b
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# a-b should lead to c 80% and d 20%
# e-b should lead to c 20% and d 80%
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,1]):
probs.fill(0)
probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time
probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time
elif key == str([4,1]):
probs.fill(0)
probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time
probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel1(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 0 and 1 equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[1] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 0 and 1 should lead to 10,11,12,13,14 with equal probability
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 1:
indices = numpy.array([10,11,12,13,14])
probs.fill(0)
probs[indices] = 1.0 # lead only to b
probs /= probs.sum()
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# 0-10 should lead to 15
# 0-11 to 16
# ...
# 1-10 should lead to 20
# 1-11 shold lean to 21
# ...
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,10]):
probs.fill(0)
probs[15] = 1
elif key == str([0,11]):
probs.fill(0)
probs[16] = 1
elif key == str([0,12]):
probs.fill(0)
probs[17] = 1
elif key == str([0,13]):
probs.fill(0)
probs[18] = 1
elif key == str([0,14]):
probs.fill(0)
probs[19] = 1
elif key == str([1,10]):
probs.fill(0)
probs[20] = 1
elif key == str([1,11]):
probs.fill(0)
probs[21] = 1
elif key == str([1,12]):
probs.fill(0)
probs[22] = 1
elif key == str([1,13]):
probs.fill(0)
probs[23] = 1
elif key == str([1,14]):
probs.fill(0)
probs[24] = 1
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel2(numCategories, alpha=0.25):
""" Generate the initial, first order, and second order transition
probabilities for 'model2'. For this model, we generate peaked random
transitions using dirichlet distributions.
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
alpha: Determines the peakedness of the transitions. Low alpha
values (alpha=0.01) place the entire weight on a single
transition. Large alpha values (alpha=10) distribute the
evenly among all transitions. Intermediate values (alpha=0.5)
give a moderately peaked transitions.
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
Here is an example of some return values for an intermediate alpha value:
initProb: [0.33, 0.33, 0.33]
firstOrder: {'[0]': [0.2, 0.7, 0.1],
'[1]': [0.1, 0.1, 0.8],
'[2]': [0.1, 0.0, 0.9]}
secondOrder: {'[0,0]': [0.1, 0.0, 0.9],
'[0,1]': [0.0, 0.2, 0.8],
'[0,2]': [0.1, 0.8, 0.1],
...
'[2,2]': [0.8, 0.2, 0.0]}
"""
# --------------------------------------------------------------------
# All initial probabilities, are equally likely
initProb = numpy.ones(numCategories)/numCategories
def generatePeakedProbabilities(lastIdx,
numCategories=numCategories,
alpha=alpha):
probs = numpy.random.dirichlet(alpha=[alpha]*numCategories)
probs[lastIdx] = 0.0
probs /= probs.sum()
return probs
# --------------------------------------------------------------------
# 1st order transitions
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = generatePeakedProbabilities(catIdx)
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = generatePeakedProbabilities(secondIdx)
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, None)
def _generateFile(filename, numRecords, categoryList, initProb,
firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None):
""" Generate a set of records reflecting a set of probabilities.
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
numRecords: number of records to generate
categoryList: list of category names
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrderProb: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrderProb: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
numNoise: Number of noise elements to place between each
sequence. The noise elements are evenly distributed from
all categories.
resetsEvery: If not None, generate a reset every N records
Here is an example of some parameters:
categoryList: ['cat1', 'cat2', 'cat3']
initProb: [0.7, 0.2, 0.1]
firstOrderProb: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# Create the file
print "Creating %s..." % (filename)
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
outFile = FileRecordStream(filename, write=True, fields=fields)
# --------------------------------------------------------------------
# Convert the probabilitie tables into cumulative probabilities
initCumProb = initProb.cumsum()
firstOrderCumProb = dict()
for (key,value) in firstOrderProb.iteritems():
firstOrderCumProb[key] = value.cumsum()
secondOrderCumProb = dict()
for (key,value) in secondOrderProb.iteritems():
secondOrderCumProb[key] = value.cumsum()
# --------------------------------------------------------------------
# Write out the sequences
elementsInSeq = []
numElementsSinceReset = 0
maxCatIdx = len(categoryList) - 1
for i in xrange(numRecords):
# Generate a reset?
if numElementsSinceReset == 0:
reset = 1
else:
reset = 0
# Pick the next element, based on how are we are into the 2nd order
# sequence.
rand = numpy.random.rand()
if len(elementsInSeq) == 0:
catIdx = numpy.searchsorted(initCumProb, rand)
elif len(elementsInSeq) == 1:
catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)
elif (len(elementsInSeq) >=2) and \
(seqLen is None or len(elementsInSeq) < seqLen-numNoise):
catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand)
else: # random "noise"
catIdx = numpy.random.randint(len(categoryList))
# Write out the record
catIdx = min(maxCatIdx, catIdx)
outFile.appendRecord([reset,categoryList[catIdx]])
#print categoryList[catIdx]
# ------------------------------------------------------------
# Increment counters
elementsInSeq.append(catIdx)
numElementsSinceReset += 1
# Generate another reset?
if resetsEvery is not None and numElementsSinceReset == resetsEvery:
numElementsSinceReset = 0
elementsInSeq = []
# Start another 2nd order sequence?
if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):
elementsInSeq = []
outFile.close()
def generate(model, filenameTrain, filenameTest, filenameCategory,
numCategories=178, numTrainingRecords=1000,
numTestingRecords=100, numNoise=5, resetsEvery=None):
numpy.random.seed(41)
# =====================================================================
# Create our categories and category file.
print "Creating %s..." % (filenameCategory)
categoryList = ['cat%d' % i for i in range(1, numCategories+1)]
categoryFile = open(filenameCategory, 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
# ====================================================================
# Generate the model
if model == 'model0':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel0(numCategories)
elif model == 'model1':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel1(numCategories)
elif model == 'model2':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel2(numCategories)
else:
raise RuntimeError("Unsupported model")
# ====================================================================
# Generate the training and testing files
_generateFile(filename=filenameTrain, numRecords=numTrainingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
_generateFile(filename=filenameTest, numRecords=numTestingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
|
titilambert/alignak
|
refs/heads/develop
|
alignak/objects/macromodulation.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# aviau, alexandre.viau@savoirfairelinux.com
# Jean Gabes, naparuba@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This module provide MacroModulation and MacroModulations classes used to change critical and
warning level in some periods (like the night)
"""
import time
from alignak.objects.item import Item, Items
from alignak.property import StringProp
from alignak.util import to_name_if_possible
from alignak.log import logger
class MacroModulation(Item):
"""
Class to manage a MacroModulation
A MacroModulation is defined to change critical and warning level in some periods (like the
night)
"""
_id = 1 # zero is always special in database, so we do not take risk here
my_type = 'macromodulation'
properties = Item.properties.copy()
properties.update({
'macromodulation_name': StringProp(fill_brok=['full_status']),
'modulation_period': StringProp(brok_transformation=to_name_if_possible,
fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
_special_properties = ('modulation_period',)
macros = {}
def get_name(self):
"""
Get the name of the timeperiod
:return: the timeperiod name string
:rtype: str
"""
return self.macromodulation_name
def is_active(self):
"""
Know if this macro is active for this correct period
:return: True is we are in the period, otherwise False
:rtype: bool
"""
now = int(time.time())
if not self.modulation_period or self.modulation_period.is_time_valid(now):
return True
return False
def is_correct(self):
"""
Check if the macromodulation is valid and have all properties defined
:return: True if valide, otherwise False
:rtype: bool
"""
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
for prop, entry in cls.properties.items():
if prop not in cls._special_properties:
if not hasattr(self, prop) and entry.required:
logger.error(
"[macromodulation::%s] %s property not set", self.get_name(), prop
)
state = False # Bad boy...
# Ok just put None as modulation_period, means 24x7
if not hasattr(self, 'modulation_period'):
self.modulation_period = None
return state
class MacroModulations(Items):
"""
Class to manage all MacroModulation
"""
name_property = "macromodulation_name"
inner_class = MacroModulation
def linkify(self, timeperiods):
"""
Link with timeperiod
:param timeperiods: Timeperiod object
:type timeperiods: object
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'modulation_period')
|
nazo/ansible
|
refs/heads/devel
|
lib/ansible/modules/utilities/logic/async_wrapper.py
|
89
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import json
except ImportError:
import simplejson as json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid }))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary notice only
"stderr": stderr,
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
####################
## main ##
####################
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
# setup job output directory
jobdir = os.path.expanduser("~/.ansible_async")
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print(json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid, "results_file" : job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)"%(sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s"%sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)"%os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)"%os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s"%(e))
print(json.dumps({
"failed" : True,
"msg" : "FATAL ERROR: %s" % str(e)
}))
sys.exit(1)
|
cpennington/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/tests/test_queue.py
|
4
|
# -*- coding: utf-8 -*-
"""Tests for the XQueue certificates interface. """
import json
from contextlib import contextmanager
from datetime import datetime, timedelta
import ddt
import freezegun
import pytz
import six
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from mock import Mock, patch
from opaque_keys.edx.locator import CourseLocator
from testfixtures import LogCapture
# It is really unfortunate that we are using the XQueue client
# code from the capa library. In the future, we should move this
# into a shared library. We import it here so we can mock it
# and verify that items are being correctly added to the queue
# in our `XQueueCertInterface` implementation.
from capa.xqueue_interface import XQueueInterface
from course_modes.models import CourseMode
from lms.djangoapps.certificates.models import (
CertificateStatuses,
ExampleCertificate,
ExampleCertificateSet,
GeneratedCertificate
)
from lms.djangoapps.certificates.queue import LOGGER, XQueueCertInterface
from lms.djangoapps.certificates.tests.factories import CertificateWhitelistFactory, GeneratedCertificateFactory
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from lms.djangoapps.verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
"""Test the "add to queue" operation of the XQueue interface. """
def setUp(self):
super(XQueueCertInterfaceAddCertificateTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
is_active=True,
mode="honor",
)
self.xqueue = XQueueCertInterface()
self.user_2 = UserFactory.create()
SoftwareSecurePhotoVerificationFactory.create(user=self.user_2, status='approved')
def test_add_cert_callback_url(self):
with mock_passing_grade():
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user, self.course.id)
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
def test_no_create_action_in_queue_for_html_view_certs(self):
"""
Tests there is no certificate create message in the queue if generate_pdf is False
"""
with mock_passing_grade():
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
self.xqueue.add_cert(self.user, self.course.id, generate_pdf=False)
# Verify that add_cert method does not add message to queue
self.assertFalse(mock_send.called)
certificate = GeneratedCertificate.eligible_certificates.get(user=self.user, course_id=self.course.id)
self.assertEqual(certificate.status, CertificateStatuses.downloadable)
self.assertIsNotNone(certificate.verify_uuid)
@ddt.data('honor', 'audit')
@override_settings(AUDIT_CERT_CUTOFF_DATE=datetime.now(pytz.UTC) - timedelta(days=1))
def test_add_cert_with_honor_certificates(self, mode):
"""Test certificates generations for honor and audit modes."""
template_name = 'certificate-template-{id.org}-{id.course}.pdf'.format(
id=self.course.id
)
mock_send = self.add_cert_to_queue(mode)
if CourseMode.is_eligible_for_certificate(mode):
self.assert_certificate_generated(mock_send, mode, template_name)
else:
self.assert_ineligible_certificate_generated(mock_send, mode)
@ddt.data('credit', 'verified')
def test_add_cert_with_verified_certificates(self, mode):
"""Test if enrollment mode is verified or credit along with valid
software-secure verification than verified certificate should be generated.
"""
template_name = 'certificate-template-{id.org}-{id.course}-verified.pdf'.format(
id=self.course.id
)
mock_send = self.add_cert_to_queue(mode)
self.assert_certificate_generated(mock_send, 'verified', template_name)
@ddt.data((True, CertificateStatuses.audit_passing), (False, CertificateStatuses.generating))
@ddt.unpack
@override_settings(AUDIT_CERT_CUTOFF_DATE=datetime.now(pytz.UTC) - timedelta(days=1))
def test_ineligible_cert_whitelisted(self, disable_audit_cert, status):
"""
Test that audit mode students receive a certificate if DISABLE_AUDIT_CERTIFICATES
feature is set to false
"""
# Enroll as audit
CourseEnrollmentFactory(
user=self.user_2,
course_id=self.course.id,
is_active=True,
mode='audit'
)
# Whitelist student
CertificateWhitelistFactory(course_id=self.course.id, user=self.user_2)
features = settings.FEATURES
features['DISABLE_AUDIT_CERTIFICATES'] = disable_audit_cert
with override_settings(FEATURES=features) and mock_passing_grade():
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user_2, self.course.id)
certificate = GeneratedCertificate.certificate_for_student(self.user_2, self.course.id)
self.assertIsNotNone(certificate)
self.assertEqual(certificate.mode, 'audit')
self.assertEqual(certificate.status, status)
def add_cert_to_queue(self, mode):
"""
Dry method for course enrollment and adding request to
queue. Returns a mock object containing information about the
`XQueueInterface.send_to_queue` method, which can be used in other
assertions.
"""
CourseEnrollmentFactory(
user=self.user_2,
course_id=self.course.id,
is_active=True,
mode=mode,
)
with mock_passing_grade():
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user_2, self.course.id)
return mock_send
def assert_certificate_generated(self, mock_send, expected_mode, expected_template_name):
"""
Assert that a certificate was generated with the correct mode and
template type.
"""
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
body = json.loads(kwargs['body'])
self.assertIn(expected_template_name, body['template_pdf'])
certificate = GeneratedCertificate.eligible_certificates.get(user=self.user_2, course_id=self.course.id)
self.assertEqual(certificate.mode, expected_mode)
def assert_ineligible_certificate_generated(self, mock_send, expected_mode):
"""
Assert that an ineligible certificate was generated with the
correct mode.
"""
# Ensure the certificate was not generated
self.assertFalse(mock_send.called)
certificate = GeneratedCertificate.objects.get(
user=self.user_2,
course_id=self.course.id
)
self.assertIn(certificate.status, (CertificateStatuses.audit_passing, CertificateStatuses.audit_notpassing))
self.assertEqual(certificate.mode, expected_mode)
@ddt.data(
(CertificateStatuses.restricted, False),
(CertificateStatuses.deleting, False),
(CertificateStatuses.generating, True),
(CertificateStatuses.unavailable, True),
(CertificateStatuses.deleted, True),
(CertificateStatuses.error, True),
(CertificateStatuses.notpassing, True),
(CertificateStatuses.downloadable, True),
(CertificateStatuses.auditing, True),
)
@ddt.unpack
def test_add_cert_statuses(self, status, should_generate):
"""
Test that certificates can or cannot be generated with the given
certificate status.
"""
with patch(
'lms.djangoapps.certificates.queue.certificate_status_for_student',
Mock(return_value={'status': status})
):
mock_send = self.add_cert_to_queue('verified')
if should_generate:
self.assertTrue(mock_send.called)
else:
self.assertFalse(mock_send.called)
@ddt.data(
# Eligible and should stay that way
(
CertificateStatuses.downloadable,
timedelta(days=-2),
'Pass',
CertificateStatuses.generating
),
# Ensure that certs in the wrong state can be fixed by regeneration
(
CertificateStatuses.downloadable,
timedelta(hours=-1),
'Pass',
CertificateStatuses.audit_passing
),
# Ineligible and should stay that way
(
CertificateStatuses.audit_passing,
timedelta(hours=-1),
'Pass',
CertificateStatuses.audit_passing
),
# As above
(
CertificateStatuses.audit_notpassing,
timedelta(hours=-1),
'Pass',
CertificateStatuses.audit_passing
),
# As above
(
CertificateStatuses.audit_notpassing,
timedelta(hours=-1),
None,
CertificateStatuses.audit_notpassing
),
)
@ddt.unpack
@override_settings(AUDIT_CERT_CUTOFF_DATE=datetime.now(pytz.UTC) - timedelta(days=1))
def test_regen_audit_certs_eligibility(self, status, created_delta, grade, expected_status):
"""
Test that existing audit certificates remain eligible even if cert
generation is re-run.
"""
# Create an existing audit enrollment and certificate
CourseEnrollmentFactory(
user=self.user_2,
course_id=self.course.id,
is_active=True,
mode=CourseMode.AUDIT,
)
created_date = datetime.now(pytz.UTC) + created_delta
with freezegun.freeze_time(created_date):
GeneratedCertificateFactory(
user=self.user_2,
course_id=self.course.id,
grade='1.0',
status=status,
mode=GeneratedCertificate.MODES.audit,
)
# Run grading/cert generation again
with mock_passing_grade(letter_grade=grade):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user_2, self.course.id)
self.assertEqual(
GeneratedCertificate.objects.get(user=self.user_2, course_id=self.course.id).status,
expected_status
)
def test_regen_cert_with_pdf_certificate(self):
"""
Test that regenerating PDF certifcate log warning message and certificate
status remains unchanged.
"""
download_url = 'http://www.example.com/certificate.pdf'
# Create an existing verifed enrollment and certificate
CourseEnrollmentFactory(
user=self.user_2,
course_id=self.course.id,
is_active=True,
mode=CourseMode.VERIFIED,
)
GeneratedCertificateFactory(
user=self.user_2,
course_id=self.course.id,
grade='1.0',
status=CertificateStatuses.downloadable,
mode=GeneratedCertificate.MODES.verified,
download_url=download_url
)
self._assert_pdf_cert_generation_dicontinued_logs(download_url)
def test_add_cert_with_existing_pdf_certificate(self):
"""
Test that add certifcate for existing PDF certificate log warning
message and certificate status remains unchanged.
"""
download_url = 'http://www.example.com/certificate.pdf'
# Create an existing verifed enrollment and certificate
CourseEnrollmentFactory(
user=self.user_2,
course_id=self.course.id,
is_active=True,
mode=CourseMode.VERIFIED,
)
GeneratedCertificateFactory(
user=self.user_2,
course_id=self.course.id,
grade='1.0',
status=CertificateStatuses.downloadable,
mode=GeneratedCertificate.MODES.verified,
download_url=download_url
)
self._assert_pdf_cert_generation_dicontinued_logs(download_url, add_cert=True)
def _assert_pdf_cert_generation_dicontinued_logs(self, download_url, add_cert=False):
"""Assert PDF certificate generation discontinued logs."""
with LogCapture(LOGGER.name) as log:
if add_cert:
self.xqueue.add_cert(self.user_2, self.course.id)
else:
self.xqueue.regen_cert(self.user_2, self.course.id)
log.check_present(
(
LOGGER.name,
'WARNING',
(
u"PDF certificate generation discontinued, canceling "
u"PDF certificate generation for student {student_id} "
u"in course '{course_id}' "
u"with status '{status}' "
u"and download_url '{download_url}'."
).format(
student_id=self.user_2.id,
course_id=six.text_type(self.course.id),
status=CertificateStatuses.downloadable,
download_url=download_url
)
)
)
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceExampleCertificateTest(TestCase):
"""Tests for the XQueue interface for certificate generation. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
TEMPLATE = 'test.pdf'
DESCRIPTION = 'test'
ERROR_MSG = 'Kaboom!'
def setUp(self):
super(XQueueCertInterfaceExampleCertificateTest, self).setUp()
self.xqueue = XQueueCertInterface()
def test_add_example_cert(self):
cert = self._create_example_cert()
with self._mock_xqueue() as mock_send:
self.xqueue.add_example_cert(cert)
# Verify that the correct payload was sent to the XQueue
self._assert_queue_task(mock_send, cert)
# Verify the certificate status
self.assertEqual(cert.status, ExampleCertificate.STATUS_STARTED)
def test_add_example_cert_error(self):
cert = self._create_example_cert()
with self._mock_xqueue(success=False):
self.xqueue.add_example_cert(cert)
# Verify the error status of the certificate
self.assertEqual(cert.status, ExampleCertificate.STATUS_ERROR)
self.assertIn(self.ERROR_MSG, cert.error_reason)
def _create_example_cert(self):
"""Create an example certificate. """
cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
return ExampleCertificate.objects.create(
example_cert_set=cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE
)
@contextmanager
def _mock_xqueue(self, success=True):
"""Mock the XQueue method for sending a task to the queue. """
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None) if success else (1, self.ERROR_MSG)
yield mock_send
def _assert_queue_task(self, mock_send, cert):
"""Check that the task was added to the queue. """
expected_header = {
'lms_key': cert.access_key,
'lms_callback_url': 'https://edx.org/update_example_certificate?key={key}'.format(key=cert.uuid),
'queue_name': 'certificates'
}
expected_body = {
'action': 'create',
'username': cert.uuid,
'name': u'John Doë',
'course_id': six.text_type(self.COURSE_KEY),
'template_pdf': 'test.pdf',
'example_certificate': True
}
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
actual_body = json.loads(kwargs['body'])
self.assertEqual(expected_header, actual_header)
self.assertEqual(expected_body, actual_body)
|
mewtaylor/django
|
refs/heads/master
|
tests/m2o_recursive/models.py
|
282
|
"""
Relating an object to itself, many-to-one
To define a many-to-one relationship between a model and itself, use
``ForeignKey('self', ...)``.
In this example, a ``Category`` is related to itself. That is, each
``Category`` has a parent ``Category``.
Set ``related_name`` to designate what the reverse relationship is called.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, blank=True, null=True, related_name='child_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Person(models.Model):
full_name = models.CharField(max_length=20)
mother = models.ForeignKey('self', models.SET_NULL, null=True, related_name='mothers_child_set')
father = models.ForeignKey('self', models.SET_NULL, null=True, related_name='fathers_child_set')
def __str__(self):
return self.full_name
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/pyformance-0.3.2/setup.py
|
31
|
import os
import functools
import platform
from setuptools import setup, find_packages
_IN_PACKAGE_DIR = functools.partial(os.path.join, "pyformance")
with open(_IN_PACKAGE_DIR("__version__.py")) as version_file:
exec(version_file.read())
install_requires = [] # optional: ["blinker==1.2"]
if platform.python_version() < '2.7':
install_requires.append('unittest2')
setup(name="pyformance",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 2.7",
],
description="Performance metrics, based on Coda Hale's Yammer metrics",
license="Apache 2.0",
author="Omer Getrel",
author_email="omer.gertel@gmail.com",
version=__version__,
packages=find_packages(exclude=["tests"]),
data_files=[],
install_requires=install_requires,
scripts=[],
)
|
Thortoise/Super-Snake
|
refs/heads/master
|
Blender/animation_nodes-master/nodes/matrix/matrix_math.py
|
1
|
import bpy
from bpy.props import *
from ... events import executionCodeChanged
from ... base_types.node import AnimationNode
operationItems = [("MULTIPLY", "Multiply", "")]
class MatrixMathNode(bpy.types.Node, AnimationNode):
bl_idname = "an_MatrixMathNode"
bl_label = "Matrix Math"
operation = EnumProperty(name = "Operation", items = operationItems,
update = executionCodeChanged)
def create(self):
self.newInput("Matrix", "A", "a")
self.newInput("Matrix", "B", "b")
self.newOutput("Matrix", "Result", "result")
def draw(self, layout):
layout.prop(self, "operation", text = "")
def getExecutionCode(self):
if self.operation == "MULTIPLY":
return "result = a * b"
|
darina/omim
|
refs/heads/master
|
tools/python/maps_generator/maps_generator.py
|
4
|
import logging
from typing import AnyStr
from typing import Iterable
from typing import Optional
from maps_generator.generator import stages_declaration as sd
from maps_generator.generator.env import Env
from maps_generator.generator.generation import Generation
from .generator.stages import Stage
logger = logging.getLogger("maps_generator")
def run_generation(
env: Env,
stages: Iterable[Stage],
from_stage: Optional[AnyStr] = None,
build_lock: bool = True,
):
generation = Generation(env, build_lock)
for s in stages:
generation.add_stage(s)
generation.run(from_stage)
def generate_maps(env: Env, from_stage: Optional[AnyStr] = None):
""""Runs maps generation."""
stages = (
sd.StageDownloadAndConvertPlanet(),
sd.StageUpdatePlanet(),
sd.StageCoastline(),
sd.StagePreprocess(),
sd.StageFeatures(),
sd.StageDownloadDescriptions(),
sd.StageMwm(),
sd.StageCountriesTxt(),
sd.StageExternalResources(),
sd.StageLocalAds(),
sd.StageStatistics(),
sd.StageCleanup(),
)
run_generation(env, stages, from_stage)
def generate_coasts(env: Env, from_stage: Optional[AnyStr] = None):
"""Runs coasts generation."""
stages = (
sd.StageDownloadAndConvertPlanet(),
sd.StageUpdatePlanet(),
sd.StageCoastline(use_old_if_fail=False),
sd.StageCleanup(),
)
run_generation(env, stages, from_stage)
|
Scille/umongo
|
refs/heads/master
|
umongo/template.py
|
1
|
from .abstract import BaseMarshmallowSchema
class MetaTemplate(type):
def __new__(cls, name, bases, nmspc):
# If user has passed parent documents as implementation, we need
# to retrieve the original templates
cooked_bases = []
for base in bases:
if issubclass(base, Implementation):
base = base.opts.template
cooked_bases.append(base)
return type.__new__(cls, name, tuple(cooked_bases), nmspc)
def __repr__(cls):
return "<Template class '%s.%s'>" % (cls.__module__, cls.__name__)
class Template(metaclass=MetaTemplate):
"""
Base class to represent a template.
"""
MA_BASE_SCHEMA_CLS = BaseMarshmallowSchema
def __init__(self, *args, **kwargs):
raise NotImplementedError('Cannot instantiate a template, '
'use instance.register result instead.')
class MetaImplementation(MetaTemplate):
def __new__(cls, name, bases, nmspc):
# `opts` is only defined by the builder to implement a template.
# If this field is missing, the user is subclassing an implementation
# to define a new type of document, thus we should construct a template class.
if 'opts' not in nmspc:
# Inheritance to avoid metaclass conflicts
return super().__new__(cls, name, bases, nmspc)
return type.__new__(cls, name, bases, nmspc)
def __repr__(cls):
return "<Implementation class '%s.%s'>" % (cls.__module__, cls.__name__)
class Implementation(metaclass=MetaImplementation):
"""
Base class to represent an implementation.
"""
@property
def opts(self):
"An implementation must provide its configuration though this attribute."
raise NotImplementedError()
def get_template(template_or_implementation):
if issubclass(template_or_implementation, Implementation):
return template_or_implementation.opts.template
assert issubclass(template_or_implementation, Template)
return template_or_implementation
|
mahak/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_console_output.py
|
3
|
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import string
import mock
import webob
from nova.api.openstack.compute import console_output \
as console_output_v21
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
def fake_get_console_output(self, _context, _instance, tail_length):
fixture = [str(i) for i in range(5)]
if tail_length is None:
pass
elif tail_length == 0:
fixture = []
else:
fixture = fixture[-int(tail_length):]
return '\n'.join(fixture)
def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
raise exception.InstanceNotReady(instance_id=_instance["uuid"])
def fake_get_console_output_all_characters(self, _ctx, _instance, _tail_len):
return string.printable
def fake_get(self, context, instance_uuid, expected_attrs=None,
cell_down_support=False):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_uuid})
def fake_get_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
class ConsoleOutputExtensionTestV21(test.NoDBTestCase):
controller_class = console_output_v21
validation_error = exception.ValidationError
def setUp(self):
super(ConsoleOutputExtensionTestV21, self).setUp()
self.stub_out('nova.compute.api.API.get_console_output',
fake_get_console_output)
self.stub_out('nova.compute.api.API.get', fake_get)
self.controller = self.controller_class.ConsoleOutputController()
self.req = fakes.HTTPRequest.blank('')
def _get_console_output(self, length_dict=None):
length_dict = length_dict or {}
body = {'os-getConsoleOutput': length_dict}
return self.controller.get_console_output(self.req, fakes.FAKE_UUID,
body=body)
def _check_console_output_failure(self, exception, body):
self.assertRaises(exception,
self.controller.get_console_output,
self.req, fakes.FAKE_UUID, body=body)
def test_get_text_console_instance_action(self):
output = self._get_console_output()
self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
def test_get_console_output_with_tail(self):
output = self._get_console_output(length_dict={'length': 3})
self.assertEqual({'output': '2\n3\n4'}, output)
def test_get_console_output_with_none_length(self):
output = self._get_console_output(length_dict={'length': None})
self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
def test_get_console_output_with_length_as_str(self):
output = self._get_console_output(length_dict={'length': '3'})
self.assertEqual({'output': '2\n3\n4'}, output)
def test_get_console_output_filtered_characters(self):
self.stub_out('nova.compute.api.API.get_console_output',
fake_get_console_output_all_characters)
output = self._get_console_output()
expect = (string.digits + string.ascii_letters +
string.punctuation + ' \t\n')
self.assertEqual({'output': expect}, output)
def test_get_text_console_no_instance(self):
self.stub_out('nova.compute.api.API.get', fake_get_not_found)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotFound, body)
def test_get_text_console_no_instance_on_get_output(self):
self.stub_out('nova.compute.api.API.get_console_output',
fake_get_not_found)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotFound, body)
def test_get_console_output_with_non_integer_length(self):
body = {'os-getConsoleOutput': {'length': 'NaN'}}
self._check_console_output_failure(self.validation_error, body)
def test_get_text_console_bad_body(self):
body = {}
self._check_console_output_failure(self.validation_error, body)
def test_get_console_output_with_length_as_float(self):
body = {'os-getConsoleOutput': {'length': 2.5}}
self._check_console_output_failure(self.validation_error, body)
def test_get_console_output_not_ready(self):
self.stub_out('nova.compute.api.API.get_console_output',
fake_get_console_output_not_ready)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPConflict, body)
def test_not_implemented(self):
self.stub_out('nova.compute.api.API.get_console_output',
fakes.fake_not_implemented)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotImplemented, body)
def test_get_console_output_with_boolean_length(self):
body = {'os-getConsoleOutput': {'length': True}}
self._check_console_output_failure(self.validation_error, body)
@mock.patch.object(compute_api.API, 'get_console_output',
side_effect=exception.ConsoleNotAvailable(
instance_uuid='fake_uuid'))
def test_get_console_output_not_available(self, mock_get_console_output):
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotFound, body)
|
google/neural-logic-machines
|
refs/heads/master
|
scripts/graph/learn_graph_tasks.py
|
1
|
#! /usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The script for family tree or general graphs experiments."""
import copy
import collections
import functools
import os
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import jacinle.random as random
import jacinle.io as io
import jactorch.nn as jacnn
from difflogic.cli import format_args
from difflogic.dataset.graph import GraphOutDegreeDataset, \
GraphConnectivityDataset, GraphAdjacentDataset, FamilyTreeDataset
from difflogic.nn.baselines import MemoryNet
from difflogic.nn.neural_logic import LogicMachine, LogicInference, LogitsInference
from difflogic.nn.neural_logic.modules._utils import meshgrid_exclude_self
from difflogic.nn.rl.reinforce import REINFORCELoss
from difflogic.thutils import binary_accuracy
from difflogic.train import TrainerBase
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger, set_output_file
from jacinle.utils.container import GView
from jacinle.utils.meter import GroupMeters
from jactorch.data.dataloader import JacDataLoader
from jactorch.optim.accum_grad import AccumGrad
from jactorch.optim.quickaccess import get_optimizer
from jactorch.train.env import TrainerEnv
from jactorch.utils.meta import as_cuda, as_numpy, as_tensor
TASKS = [
'outdegree', 'connectivity', 'adjacent', 'adjacent-mnist', 'has-father',
'has-sister', 'grandparents', 'uncle', 'maternal-great-uncle'
]
parser = JacArgumentParser()
parser.add_argument(
'--model',
default='nlm',
choices=['nlm', 'memnet'],
help='model choices, nlm: Neural Logic Machine, memnet: Memory Networks')
# NLM parameters, works when model is 'nlm'
nlm_group = parser.add_argument_group('Neural Logic Machines')
LogicMachine.make_nlm_parser(
nlm_group, {
'depth': 4,
'breadth': 3,
'exclude_self': True,
'logic_hidden_dim': []
},
prefix='nlm')
nlm_group.add_argument(
'--nlm-attributes',
type=int,
default=8,
metavar='N',
help='number of output attributes in each group of each layer of the LogicMachine'
)
# MemNN parameters, works when model is 'memnet'
memnet_group = parser.add_argument_group('Memory Networks')
MemoryNet.make_memnet_parser(memnet_group, {}, prefix='memnet')
# task related
task_group = parser.add_argument_group('Task')
task_group.add_argument(
'--task', required=True, choices=TASKS, help='tasks choices')
task_group.add_argument(
'--train-number',
type=int,
default=10,
metavar='N',
help='size of training instances')
task_group.add_argument(
'--adjacent-pred-colors', type=int, default=4, metavar='N')
task_group.add_argument('--outdegree-n', type=int, default=2, metavar='N')
task_group.add_argument(
'--connectivity-dist-limit', type=int, default=4, metavar='N')
data_gen_group = parser.add_argument_group('Data Generation')
data_gen_group.add_argument(
'--gen-graph-method',
default='edge',
choices=['dnc', 'edge'],
help='method use to generate random graph')
data_gen_group.add_argument(
'--gen-graph-pmin',
type=float,
default=0.0,
metavar='F',
help='control parameter p reflecting the graph sparsity')
data_gen_group.add_argument(
'--gen-graph-pmax',
type=float,
default=0.3,
metavar='F',
help='control parameter p reflecting the graph sparsity')
data_gen_group.add_argument(
'--gen-graph-colors',
type=int,
default=4,
metavar='N',
help='number of colors in adjacent task')
data_gen_group.add_argument(
'--gen-directed', action='store_true', help='directed graph')
train_group = parser.add_argument_group('Train')
train_group.add_argument(
'--seed',
type=int,
default=None,
metavar='SEED',
help='seed of jacinle.random')
train_group.add_argument(
'--use-gpu', action='store_true', help='use GPU or not')
train_group.add_argument(
'--optimizer',
default='AdamW',
choices=['SGD', 'Adam', 'AdamW'],
help='optimizer choices')
train_group.add_argument(
'--lr',
type=float,
default=0.005,
metavar='F',
help='initial learning rate')
train_group.add_argument(
'--lr-decay',
type=float,
default=1.0,
metavar='F',
help='exponential decay of learning rate per lesson')
train_group.add_argument(
'--accum-grad',
type=int,
default=1,
metavar='N',
help='accumulated gradient for batches (default: 1)')
train_group.add_argument(
'--ohem-size',
type=int,
default=0,
metavar='N',
help='size of online hard negative mining')
train_group.add_argument(
'--batch-size',
type=int,
default=4,
metavar='N',
help='batch size for training')
train_group.add_argument(
'--test-batch-size',
type=int,
default=4,
metavar='N',
help='batch size for testing')
train_group.add_argument(
'--early-stop-loss-thresh',
type=float,
default=1e-5,
metavar='F',
help='threshold of loss for early stop')
# Note that nr_examples_per_epoch = epoch_size * batch_size
TrainerBase.make_trainer_parser(
parser, {
'epochs': 50,
'epoch_size': 250,
'test_epoch_size': 250,
'test_number_begin': 10,
'test_number_step': 10,
'test_number_end': 50,
})
io_group = parser.add_argument_group('Input/Output')
io_group.add_argument(
'--dump-dir', type=str, default=None, metavar='DIR', help='dump dir')
io_group.add_argument(
'--load-checkpoint',
type=str,
default=None,
metavar='FILE',
help='load parameters from checkpoint')
schedule_group = parser.add_argument_group('Schedule')
schedule_group.add_argument(
'--runs', type=int, default=1, metavar='N', help='number of runs')
schedule_group.add_argument(
'--save-interval',
type=int,
default=10,
metavar='N',
help='the interval(number of epochs) to save checkpoint')
schedule_group.add_argument(
'--test-interval',
type=int,
default=None,
metavar='N',
help='the interval(number of epochs) to do test')
schedule_group.add_argument(
'--test-only', action='store_true', help='test-only mode')
logger = get_logger(__file__)
args = parser.parse_args()
args.use_gpu = args.use_gpu and torch.cuda.is_available()
if args.dump_dir is not None:
io.mkdir(args.dump_dir)
args.log_file = os.path.join(args.dump_dir, 'log.log')
set_output_file(args.log_file)
else:
args.checkpoints_dir = None
args.summary_file = None
if args.seed is not None:
import jacinle.random as random
random.reset_global_seed(args.seed)
args.task_is_outdegree = args.task in ['outdegree']
args.task_is_connectivity = args.task in ['connectivity']
args.task_is_adjacent = args.task in ['adjacent', 'adjacent-mnist']
args.task_is_family_tree = args.task in [
'has-father', 'has-sister', 'grandparents', 'uncle', 'maternal-great-uncle'
]
args.task_is_mnist_input = args.task in ['adjacent-mnist']
args.task_is_1d_output = args.task in [
'outdegree', 'adjacent', 'adjacent-mnist', 'has-father', 'has-sister'
]
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = jacnn.Conv2dLayer(
1, 10, kernel_size=5, batch_norm=True, activation='relu')
self.conv2 = jacnn.Conv2dLayer(
10,
20,
kernel_size=5,
batch_norm=True,
dropout=False,
activation='relu')
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), 2)
x = F.max_pool2d(self.conv2(x), 2)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class Model(nn.Module):
"""The model for family tree or general graphs path tasks."""
def __init__(self):
super().__init__()
# inputs
input_dim = 4 if args.task_is_family_tree else 1
self.feature_axis = 1 if args.task_is_1d_output else 2
# features
if args.model == 'nlm':
input_dims = [0 for _ in range(args.nlm_breadth + 1)]
if args.task_is_adjacent:
input_dims[1] = args.gen_graph_colors
if args.task_is_mnist_input:
self.lenet = LeNet()
input_dims[2] = input_dim
self.features = LogicMachine.from_args(
input_dims, args.nlm_attributes, args, prefix='nlm')
output_dim = self.features.output_dims[self.feature_axis]
elif args.model == 'memnet':
if args.task_is_adjacent:
input_dim += args.gen_graph_colors
self.feature = MemoryNet.from_args(
input_dim, self.feature_axis, args, prefix='memnet')
output_dim = self.feature.get_output_dim()
# target
target_dim = args.adjacent_pred_colors if args.task_is_adjacent else 1
self.pred = LogicInference(output_dim, target_dim, [])
# losses
if args.ohem_size > 0:
from jactorch.nn.losses import BinaryCrossEntropyLossWithProbs as BCELoss
self.loss = BCELoss(average='none')
else:
self.loss = nn.BCELoss()
def forward(self, feed_dict):
feed_dict = GView(feed_dict)
# properties
if args.task_is_adjacent:
states = feed_dict.states.float()
else:
states = None
# relations
relations = feed_dict.relations.float()
batch_size, nr = relations.size()[:2]
if args.model == 'nlm':
if args.task_is_adjacent and args.task_is_mnist_input:
states_shape = states.size()
states = states.view((-1,) + states_shape[2:])
states = self.lenet(states)
states = states.view(states_shape[:2] + (-1,))
states = F.sigmoid(states)
inp = [None for _ in range(args.nlm_breadth + 1)]
inp[1] = states
inp[2] = relations
depth = None
if args.nlm_recursion:
depth = 1
while 2**depth + 1 < nr:
depth += 1
depth = depth * 2 + 1
feature = self.features(inp, depth=depth)[self.feature_axis]
elif args.model == 'memnet':
feature = self.feature(relations, states)
if args.task_is_adjacent and args.task_is_mnist_input:
raise NotImplementedError()
pred = self.pred(feature)
if not args.task_is_adjacent:
pred = pred.squeeze(-1)
if args.task_is_connectivity:
pred = meshgrid_exclude_self(pred) # exclude self-cycle
if self.training:
monitors = dict()
target = feed_dict.target.float()
if args.task_is_adjacent:
target = target[:, :, :args.adjacent_pred_colors]
monitors.update(binary_accuracy(target, pred, return_float=False))
loss = self.loss(pred, target)
# ohem loss is unused.
if args.ohem_size > 0:
loss = loss.view(-1).topk(args.ohem_size)[0].mean()
return loss, monitors, dict(pred=pred)
else:
return dict(pred=pred)
def make_dataset(n, epoch_size, is_train):
pmin, pmax = args.gen_graph_pmin, args.gen_graph_pmax
if args.task_is_outdegree:
return GraphOutDegreeDataset(
args.outdegree_n,
epoch_size,
n,
pmin=pmin,
pmax=pmax,
directed=args.gen_directed,
gen_method=args.gen_graph_method)
elif args.task_is_connectivity:
nmin, nmax = n, n
if is_train and args.nlm_recursion:
nmin = 2
return GraphConnectivityDataset(
args.connectivity_dist_limit,
epoch_size,
nmin,
pmin,
nmax,
pmax,
directed=args.gen_directed,
gen_method=args.gen_graph_method)
elif args.task_is_adjacent:
return GraphAdjacentDataset(
args.gen_graph_colors,
epoch_size,
n,
pmin=pmin,
pmax=pmax,
directed=args.gen_directed,
gen_method=args.gen_graph_method,
is_train=is_train,
is_mnist_colors=args.task_is_mnist_input)
else:
return FamilyTreeDataset(args.task, epoch_size, n, p_marriage=1.0)
class MyTrainer(TrainerBase):
def save_checkpoint(self, name):
if args.checkpoints_dir is not None:
checkpoint_file = os.path.join(args.checkpoints_dir,
'checkpoint_{}.pth'.format(name))
super().save_checkpoint(checkpoint_file)
def _dump_meters(self, meters, mode):
if args.summary_file is not None:
meters_kv = meters._canonize_values('avg')
meters_kv['mode'] = mode
meters_kv['epoch'] = self.current_epoch
with open(args.summary_file, 'a') as f:
f.write(io.dumps_json(meters_kv))
f.write('\n')
data_iterator = {}
def _prepare_dataset(self, epoch_size, mode):
assert mode in ['train', 'test']
if mode == 'train':
batch_size = args.batch_size
number = args.train_number
else:
batch_size = args.test_batch_size
number = self.test_number
# The actual number of instances in an epoch is epoch_size * batch_size.
dataset = make_dataset(number, epoch_size * batch_size, mode == 'train')
dataloader = JacDataLoader(
dataset,
shuffle=True,
batch_size=batch_size,
num_workers=min(epoch_size, 4))
self.data_iterator[mode] = dataloader.__iter__()
def _get_data(self, index, meters, mode):
feed_dict = self.data_iterator[mode].next()
meters.update(number=feed_dict['n'].data.numpy().mean())
if args.use_gpu:
feed_dict = as_cuda(feed_dict)
return feed_dict
def _get_result(self, index, meters, mode):
feed_dict = self._get_data(index, meters, mode)
output_dict = self.model(feed_dict)
target = feed_dict['target']
if args.task_is_adjacent:
target = target[:, :, :args.adjacent_pred_colors]
result = binary_accuracy(target, output_dict['pred'])
succ = result['accuracy'] == 1.0
meters.update(succ=succ)
meters.update(result, n=target.size(0))
message = '> {} iter={iter}, accuracy={accuracy:.4f}, \
balance_acc={balanced_accuracy:.4f}'.format(
mode, iter=index, **meters.val)
return message, dict(succ=succ, feed_dict=feed_dict)
def _get_train_data(self, index, meters):
return self._get_data(index, meters, mode='train')
def _train_epoch(self, epoch_size):
meters = super()._train_epoch(epoch_size)
i = self.current_epoch
if args.save_interval is not None and i % args.save_interval == 0:
self.save_checkpoint(str(i))
if args.test_interval is not None and i % args.test_interval == 0:
self.test()
return meters
def _early_stop(self, meters):
return meters.avg['loss'] < args.early_stop_loss_thresh
def main(run_id):
if args.dump_dir is not None:
if args.runs > 1:
args.current_dump_dir = os.path.join(args.dump_dir,
'run_{}'.format(run_id))
io.mkdir(args.current_dump_dir)
else:
args.current_dump_dir = args.dump_dir
args.summary_file = os.path.join(args.current_dump_dir, 'summary.json')
args.checkpoints_dir = os.path.join(args.current_dump_dir, 'checkpoints')
io.mkdir(args.checkpoints_dir)
logger.info(format_args(args))
model = Model()
if args.use_gpu:
model.cuda()
optimizer = get_optimizer(args.optimizer, model, args.lr)
if args.accum_grad > 1:
optimizer = AccumGrad(optimizer, args.accum_grad)
trainer = MyTrainer.from_args(model, optimizer, args)
if args.load_checkpoint is not None:
trainer.load_checkpoint(args.load_checkpoint)
if args.test_only:
return None, trainer.test()
final_meters = trainer.train()
trainer.save_checkpoint('last')
return trainer.early_stopped, trainer.test()
if __name__ == '__main__':
stats = []
nr_graduated = 0
for i in range(args.runs):
graduated, test_meters = main(i)
logger.info('run {}'.format(i + 1))
if test_meters is not None:
for j, meters in enumerate(test_meters):
if len(stats) <= j:
stats.append(GroupMeters())
stats[j].update(
number=meters.avg['number'], test_acc=meters.avg['accuracy'])
for meters in stats:
logger.info('number {}, test_acc {}'.format(meters.avg['number'],
meters.avg['test_acc']))
if not args.test_only:
nr_graduated += int(graduated)
logger.info('graduate_ratio {}'.format(nr_graduated / (i + 1)))
if graduated:
for j, meters in enumerate(test_meters):
stats[j].update(grad_test_acc=meters.avg['accuracy'])
if nr_graduated > 0:
for meters in stats:
logger.info('number {}, grad_test_acc {}'.format(
meters.avg['number'], meters.avg['grad_test_acc']))
|
open-pli/enigma2
|
refs/heads/master
|
lib/python/Tools/ISO639.py
|
79
|
# -*- coding: iso-8859-2 -*-
LanguageCodes = { }
LanguageCodes["aar"] = LanguageCodes["aa"] = ("Afar", "Hamitic")
LanguageCodes["abk"] = LanguageCodes["ab"] = ("Abkhazian", "Ibero-caucasian")
LanguageCodes["ace"] = ("Achinese", "")
LanguageCodes["ach"] = ("Acoli", "")
LanguageCodes["ada"] = ("Adangme", "")
LanguageCodes["afa"] = ("Afro-Asiatic (Other)", "")
LanguageCodes["afh"] = ("Afrihili", "")
LanguageCodes["afr"] = LanguageCodes["af"] = ("Afrikaans", "Germanic")
LanguageCodes["aka"] = ("Akan", "")
LanguageCodes["akk"] = ("Akkadian", "")
LanguageCodes["ale"] = ("Aleut", "")
LanguageCodes["alg"] = ("Algonquian languages", "")
LanguageCodes["amh"] = LanguageCodes["am"] = ("Amharic", "Semitic")
LanguageCodes["ang"] = ("English, Old (ca. 450-1100)", "")
LanguageCodes["apa"] = ("Apache languages", "")
LanguageCodes["ara"] = LanguageCodes["ar"] = ("Arabic", "Semitic")
LanguageCodes["arc"] = ("Aramaic", "")
LanguageCodes["arn"] = ("Araucanian", "")
LanguageCodes["arp"] = ("Arapaho", "")
LanguageCodes["art"] = ("Artificial (Other)", "")
LanguageCodes["arw"] = ("Arawak", "")
LanguageCodes["asm"] = LanguageCodes["as"] = ("Assamese", "Indian")
LanguageCodes["ath"] = ("Athapascan languages", "")
LanguageCodes["aus"] = ("Australian languages", "")
LanguageCodes["ava"] = ("Avaric", "")
LanguageCodes["ave"] = LanguageCodes["ae"] = ("Avestan", "")
LanguageCodes["awa"] = ("Awadhi", "")
LanguageCodes["aym"] = LanguageCodes["ay"] = ("Aymara", "Amerindian")
LanguageCodes["aze"] = LanguageCodes["az"] = ("Azerbaijani", "Turkic/altaic")
LanguageCodes["bad"] = ("Banda", "")
LanguageCodes["bai"] = ("Bamileke languages", "")
LanguageCodes["bak"] = LanguageCodes["ba"] = ("Bashkir", "Turkic/altaic")
LanguageCodes["bal"] = ("Baluchi", "")
LanguageCodes["bam"] = ("Bambara", "")
LanguageCodes["ban"] = ("Balinese", "")
LanguageCodes["bas"] = ("Basa", "")
LanguageCodes["bat"] = ("Baltic (Other)", "")
LanguageCodes["bej"] = ("Beja", "")
LanguageCodes["bel"] = LanguageCodes["be"] = ("Belarusian", "Slavic")
LanguageCodes["bem"] = ("Bemba", "")
LanguageCodes["ben"] = LanguageCodes["bn"] = ("Bengali", "Indian")
LanguageCodes["ber"] = ("Berber (Other)", "")
LanguageCodes["bho"] = ("Bhojpuri", "")
LanguageCodes["bih"] = LanguageCodes["bh"] = ("Bihari", "Indian")
LanguageCodes["bik"] = ("Bikol", "")
LanguageCodes["bin"] = ("Bini", "")
LanguageCodes["bis"] = LanguageCodes["bi"] = ("Bislama", "")
LanguageCodes["bla"] = ("Siksika", "")
LanguageCodes["bnt"] = ("Bantu (Other)", "")
LanguageCodes["bod"] = LanguageCodes["tib"] = LanguageCodes["bo"] = ("Tibetan", "Asian")
LanguageCodes["bos"] = LanguageCodes["bs"] = ("Bosnian", "")
LanguageCodes["bra"] = ("Braj", "")
LanguageCodes["bre"] = LanguageCodes["br"] = ("Breton", "Celtic")
LanguageCodes["btk"] = ("Batak (Indonesia)", "")
LanguageCodes["bua"] = ("Buriat", "")
LanguageCodes["bug"] = ("Buginese", "")
LanguageCodes["bul"] = LanguageCodes["bg"] = ("Bulgarian", "Slavic")
LanguageCodes["cad"] = ("Caddo", "")
LanguageCodes["cai"] = ("Central American Indian (Other)", "")
LanguageCodes["car"] = ("Carib", "")
LanguageCodes["cat"] = LanguageCodes["ca"] = ("Catalan", "Romance")
LanguageCodes["cau"] = ("Caucasian (Other)", "")
LanguageCodes["ceb"] = ("Cebuano", "")
LanguageCodes["cel"] = ("Celtic (Other)", "")
LanguageCodes["ces"] = LanguageCodes["cze"] = LanguageCodes["cs"] = ("Czech", "Slavic")
LanguageCodes["cha"] = LanguageCodes["ch"] = ("Chamorro", "")
LanguageCodes["chb"] = ("Chibcha", "")
LanguageCodes["che"] = LanguageCodes["ce"] = ("Chechen", "")
LanguageCodes["chg"] = ("Chagatai", "")
LanguageCodes["chk"] = ("Chuukese", "")
LanguageCodes["chm"] = ("Mari", "")
LanguageCodes["chn"] = ("Chinook jargon", "")
LanguageCodes["cho"] = ("Choctaw", "")
LanguageCodes["chp"] = ("Chipewyan", "")
LanguageCodes["chr"] = ("Cherokee", "")
LanguageCodes["chu"] = LanguageCodes["cu"] = ("Church Slavic", "")
LanguageCodes["chv"] = LanguageCodes["cv"] = ("Chuvash", "")
LanguageCodes["chy"] = ("Cheyenne", "")
LanguageCodes["cmc"] = ("Chamic languages", "")
LanguageCodes["cop"] = ("Coptic", "")
LanguageCodes["cor"] = LanguageCodes["kw"] = ("Cornish", "")
LanguageCodes["cos"] = LanguageCodes["co"] = ("Corsican", "Romance")
LanguageCodes["cpe"] = ("Creoles and pidgins, English based (Other)", "")
LanguageCodes["cpf"] = ("Creoles and pidgins, French-based (Other)", "")
LanguageCodes["cpp"] = ("Creoles and pidgins, Portuguese-based (Other)", "")
LanguageCodes["cre"] = ("Cree", "")
LanguageCodes["crp"] = ("Creoles and pidgins (Other)", "")
LanguageCodes["cus"] = ("Cushitic (Other)", "")
LanguageCodes["cym"] = LanguageCodes["wel"] = LanguageCodes["cy"] = ("Welsh", "Celtic")
LanguageCodes["dak"] = ("Dakota", "")
LanguageCodes["dan"] = LanguageCodes["da"] = ("Danish", "Germanic")
LanguageCodes["day"] = ("Dayak", "")
LanguageCodes["del"] = ("Delaware", "")
LanguageCodes["den"] = ("Slave (Athapascan)", "")
LanguageCodes["deu"] = LanguageCodes["ger"] = LanguageCodes["de"] = ("German", "Germanic")
LanguageCodes["dgr"] = ("Dogrib", "")
LanguageCodes["din"] = ("Dinka", "")
LanguageCodes["div"] = ("Divehi", "")
LanguageCodes["doi"] = ("Dogri", "")
LanguageCodes["dra"] = ("Dravidian (Other)", "")
LanguageCodes["dua"] = ("Duala", "")
LanguageCodes["dum"] = ("Dutch, Middle (ca. 1050-1350)", "")
LanguageCodes["dyu"] = ("Dyula", "")
LanguageCodes["dzo"] = LanguageCodes["dz"] = ("Dzongkha", "Asian")
LanguageCodes["efi"] = ("Efik", "")
LanguageCodes["egy"] = ("Egyptian (Ancient)", "")
LanguageCodes["eka"] = ("Ekajuk", "")
LanguageCodes["ell"] = LanguageCodes["gre"] = LanguageCodes["el"] = ("Greek, Modern (1453-)", "Latin/greek")
LanguageCodes["elx"] = ("Elamite", "")
LanguageCodes["eng"] = LanguageCodes["en"] = ("English", "Germanic")
LanguageCodes["enm"] = ("English, Middle (1100-1500)", "")
LanguageCodes["epo"] = LanguageCodes["eo"] = ("Esperanto", "International aux.")
LanguageCodes["est"] = LanguageCodes["et"] = ("Estonian", "Finno-ugric")
LanguageCodes["eus"] = LanguageCodes["baq"] = LanguageCodes["eu"] = ("Basque", "Basque")
LanguageCodes["ewe"] = ("Ewe", "")
LanguageCodes["ewo"] = ("Ewondo", "")
LanguageCodes["fan"] = ("Fang", "")
LanguageCodes["fao"] = LanguageCodes["fo"] = ("Faroese", "Germanic")
LanguageCodes["fas"] = LanguageCodes["per"] = LanguageCodes["fa"] = ("Persian", "")
LanguageCodes["fat"] = ("Fanti", "")
LanguageCodes["fij"] = LanguageCodes["fj"] = ("Fijian", "Oceanic/indonesian")
LanguageCodes["fin"] = LanguageCodes["fi"] = ("Finnish", "Finno-ugric")
LanguageCodes["fiu"] = ("Finno-Ugrian (Other)", "")
LanguageCodes["fon"] = ("Fon", "")
LanguageCodes["fra"] = LanguageCodes["fre"] = LanguageCodes["fr"] = ("French", "Romance")
LanguageCodes["frm"] = ("French, Middle (ca. 1400-1600)", "")
LanguageCodes["fro"] = ("French, Old (842-ca. 1400)", "")
LanguageCodes["fry"] = LanguageCodes["fy"] = ("Frisian", "Germanic")
LanguageCodes["ful"] = ("Fulah", "")
LanguageCodes["fur"] = ("Friulian", "")
LanguageCodes["gaa"] = ("Ga", "")
LanguageCodes["gay"] = ("Gayo", "")
LanguageCodes["gba"] = ("Gbaya", "")
LanguageCodes["gem"] = ("Germanic (Other)", "")
LanguageCodes["gez"] = ("Geez", "")
LanguageCodes["gil"] = ("Gilbertese", "")
LanguageCodes["gla"] = LanguageCodes["gd"] = ("Gaelic (Scots)", "Celtic")
LanguageCodes["gle"] = LanguageCodes["ga"] = ("Irish", "Celtic")
LanguageCodes["glg"] = LanguageCodes["gl"] = ("Gallegan", "Romance")
LanguageCodes["glv"] = LanguageCodes["gv"] = ("Manx", "")
LanguageCodes["gmh"] = ("German, Middle High (ca. 1050-1500)", "")
LanguageCodes["goh"] = ("German, Old High (ca. 750-1050)", "")
LanguageCodes["gon"] = ("Gondi", "")
LanguageCodes["gor"] = ("Gorontalo", "")
LanguageCodes["got"] = ("Gothic", "")
LanguageCodes["grb"] = ("Grebo", "")
LanguageCodes["grc"] = ("Greek, Ancient (to 1453)", "")
LanguageCodes["grn"] = LanguageCodes["gn"] = ("Guarani", "Amerindian")
LanguageCodes["guj"] = LanguageCodes["gu"] = ("Gujarati", "Indian")
LanguageCodes["gwi"] = ("Gwich´in", "")
LanguageCodes["hai"] = ("Haida", "")
LanguageCodes["hau"] = LanguageCodes["ha"] = ("Hausa", "Negro-african")
LanguageCodes["haw"] = ("Hawaiian", "")
LanguageCodes["heb"] = LanguageCodes["he"] = ("Hebrew", "")
LanguageCodes["her"] = LanguageCodes["hz"] = ("Herero", "")
LanguageCodes["hil"] = ("Hiligaynon", "")
LanguageCodes["him"] = ("Himachali", "")
LanguageCodes["hin"] = LanguageCodes["hi"] = ("Hindi", "Indian")
LanguageCodes["hit"] = ("Hittite", "")
LanguageCodes["hmn"] = ("Hmong", "")
LanguageCodes["hmo"] = LanguageCodes["ho"] = ("Hiri Motu", "")
LanguageCodes["hrv"] = LanguageCodes["scr"] = LanguageCodes["hr"] = ("Croatian", "Slavic")
LanguageCodes["hun"] = LanguageCodes["hu"] = ("Hungarian", "Finno-ugric")
LanguageCodes["hup"] = ("Hupa", "")
LanguageCodes["hye"] = LanguageCodes["arm"] = LanguageCodes["hy"] = ("Armenian", "Indo-european (other)")
LanguageCodes["iba"] = ("Iban", "")
LanguageCodes["ibo"] = ("Igbo", "")
LanguageCodes["ijo"] = ("Ijo", "")
LanguageCodes["iku"] = LanguageCodes["iu"] = ("Inuktitut", "")
LanguageCodes["ile"] = LanguageCodes["ie"] = ("Interlingue", "International aux.")
LanguageCodes["ilo"] = ("Iloko", "")
LanguageCodes["ina"] = LanguageCodes["ia"] = ("Interlingua (International Auxiliary Language Association)", "International aux.")
LanguageCodes["inc"] = ("Indic (Other)", "")
LanguageCodes["ind"] = LanguageCodes["id"] = ("Indonesian", "")
LanguageCodes["ine"] = ("Indo-European (Other)", "")
LanguageCodes["ipk"] = LanguageCodes["ik"] = ("Inupiaq", "Eskimo")
LanguageCodes["ira"] = ("Iranian (Other)", "")
LanguageCodes["iro"] = ("Iroquoian languages", "")
LanguageCodes["isl"] = LanguageCodes["ice"] = LanguageCodes["is"] = ("Icelandic", "Germanic")
LanguageCodes["ita"] = LanguageCodes["it"] = ("Italian", "Romance")
LanguageCodes["jaw"] = LanguageCodes["jav"] = LanguageCodes["jw"] = ("Javanese", "")
LanguageCodes["jpn"] = LanguageCodes["ja"] = ("Japanese", "Asian")
LanguageCodes["jpr"] = ("Judeo-Persian", "")
LanguageCodes["kaa"] = ("Kara-Kalpak", "")
LanguageCodes["kab"] = ("Kabyle", "")
LanguageCodes["kac"] = ("Kachin", "")
LanguageCodes["kal"] = LanguageCodes["kl"] = ("Kalaallisut", "Eskimo")
LanguageCodes["kam"] = ("Kamba", "")
LanguageCodes["kan"] = LanguageCodes["kn"] = ("Kannada", "Dravidian")
LanguageCodes["kar"] = ("Karen", "")
LanguageCodes["kas"] = LanguageCodes["ks"] = ("Kashmiri", "Indian")
LanguageCodes["kat"] = LanguageCodes["geo"] = LanguageCodes["ka"] = ("Georgian", "Ibero-caucasian")
LanguageCodes["kau"] = ("Kanuri", "")
LanguageCodes["kaw"] = ("Kawi", "")
LanguageCodes["kaz"] = LanguageCodes["kk"] = ("Kazakh", "Turkic/altaic")
LanguageCodes["kha"] = ("Khasi", "")
LanguageCodes["khi"] = ("Khoisan (Other)", "")
LanguageCodes["khm"] = LanguageCodes["km"] = ("Khmer", "Asian")
LanguageCodes["kho"] = ("Khotanese", "")
LanguageCodes["kik"] = LanguageCodes["ki"] = ("Kikuyu", "")
LanguageCodes["kin"] = LanguageCodes["rw"] = ("Kinyarwanda", "Negro-african")
LanguageCodes["kir"] = LanguageCodes["ky"] = ("Kirghiz", "Turkic/altaic")
LanguageCodes["kmb"] = ("Kimbundu", "")
LanguageCodes["kok"] = ("Konkani", "")
LanguageCodes["kom"] = LanguageCodes["kv"] = ("Komi", "")
LanguageCodes["kon"] = ("Kongo", "")
LanguageCodes["kor"] = LanguageCodes["ko"] = ("Korean", "Asian")
LanguageCodes["kos"] = ("Kosraean", "")
LanguageCodes["kpe"] = ("Kpelle", "")
LanguageCodes["kro"] = ("Kru", "")
LanguageCodes["kru"] = ("Kurukh", "")
LanguageCodes["kum"] = ("Kumyk", "")
LanguageCodes["kur"] = LanguageCodes["ku"] = ("Kurdish", "Iranian")
LanguageCodes["kut"] = ("Kutenai", "")
LanguageCodes["lad"] = ("Ladino", "")
LanguageCodes["lah"] = ("Lahnda", "")
LanguageCodes["lam"] = ("Lamba", "")
LanguageCodes["lao"] = LanguageCodes["lo"] = ("Lao", "Asian")
LanguageCodes["lat"] = LanguageCodes["la"] = ("Latin", "Latin/greek")
LanguageCodes["lav"] = LanguageCodes["lv"] = ("Latvian", "Baltic")
LanguageCodes["lez"] = ("Lezghian", "")
LanguageCodes["lin"] = LanguageCodes["ln"] = ("Lingala", "Negro-african")
LanguageCodes["lit"] = LanguageCodes["lt"] = ("Lithuanian", "Baltic")
LanguageCodes["lol"] = ("Mongo", "")
LanguageCodes["loz"] = ("Lozi", "")
LanguageCodes["ltz"] = LanguageCodes["lb"] = ("Letzeburgesch", "")
LanguageCodes["lua"] = ("Luba-Lulua", "")
LanguageCodes["lub"] = ("Luba-Katanga", "")
LanguageCodes["lug"] = ("Ganda", "")
LanguageCodes["lui"] = ("Luiseno", "")
LanguageCodes["lun"] = ("Lunda", "")
LanguageCodes["luo"] = ("Luo (Kenya and Tanzania)", "")
LanguageCodes["lus"] = ("lushai", "")
LanguageCodes["mad"] = ("Madurese", "")
LanguageCodes["mag"] = ("Magahi", "")
LanguageCodes["mah"] = LanguageCodes["mh"] = ("Marshall", "")
LanguageCodes["mai"] = ("Maithili", "")
LanguageCodes["mak"] = ("Makasar", "")
LanguageCodes["mal"] = LanguageCodes["ml"] = ("Malayalam", "Dravidian")
LanguageCodes["man"] = ("Mandingo", "")
LanguageCodes["map"] = ("Austronesian (Other)", "")
LanguageCodes["mar"] = LanguageCodes["mr"] = ("Marathi", "Indian")
LanguageCodes["mas"] = ("Masai", "")
LanguageCodes["mdr"] = ("Mandar", "")
LanguageCodes["men"] = ("Mende", "")
LanguageCodes["mga"] = ("Irish, Middle (900-1200)", "")
LanguageCodes["mic"] = ("Micmac", "")
LanguageCodes["min"] = ("Minangkabau", "")
LanguageCodes["mis"] = ("Miscellaneous languages", "")
LanguageCodes["mkd"] = LanguageCodes["mac"] = LanguageCodes["mk"] = ("Macedonian", "Slavic")
LanguageCodes["mkh"] = ("Mon-Khmer (Other)", "")
LanguageCodes["mlg"] = LanguageCodes["mg"] = ("Malagasy", "Oceanic/indonesian")
LanguageCodes["mlt"] = LanguageCodes["mt"] = ("Maltese", "Semitic")
LanguageCodes["mnc"] = ("Manchu", "")
LanguageCodes["mni"] = ("Manipuri", "")
LanguageCodes["mno"] = ("Manobo languages", "")
LanguageCodes["moh"] = ("Mohawk", "")
LanguageCodes["mol"] = LanguageCodes["mo"] = ("Moldavian", "Romance")
LanguageCodes["mon"] = LanguageCodes["mn"] = ("Mongolian", "")
LanguageCodes["mos"] = ("Mossi", "")
LanguageCodes["mri"] = LanguageCodes["mao"] = LanguageCodes["mi"] = ("Maori", "Oceanic/indonesian")
LanguageCodes["msa"] = LanguageCodes["may"] = LanguageCodes["ms"] = ("Malay", "Oceanic/indonesian")
LanguageCodes["mul"] = ("Multiple languages", "")
LanguageCodes["mun"] = ("Munda languages", "")
LanguageCodes["mus"] = ("Creek", "")
LanguageCodes["mwr"] = ("Marwari", "")
LanguageCodes["mya"] = LanguageCodes["bur"] = LanguageCodes["my"] = ("Burmese", "Asian")
LanguageCodes["myn"] = ("Mayan languages", "")
LanguageCodes["nah"] = ("Nahuatl", "")
LanguageCodes["nai"] = ("North American Indian", "")
LanguageCodes["nau"] = LanguageCodes["na"] = ("Nauru", "")
LanguageCodes["nav"] = LanguageCodes["nv"] = ("Navajo", "")
LanguageCodes["nbl"] = LanguageCodes["nr"] = ("Ndebele, South", "")
LanguageCodes["nde"] = LanguageCodes["nd"] = ("Ndebele, North", "")
LanguageCodes["ndo"] = LanguageCodes["ng"] = ("Ndonga", "")
LanguageCodes["nds"] = ("Low German; Low Saxon; German, Low; Saxon, Low", "")
LanguageCodes["nep"] = LanguageCodes["ne"] = ("Nepali", "Indian")
LanguageCodes["new"] = ("Newari", "")
LanguageCodes["nia"] = ("Nias", "")
LanguageCodes["nic"] = ("Niger-Kordofanian (Other)", "")
LanguageCodes["niu"] = ("Niuean", "")
LanguageCodes["nld"] = LanguageCodes["dut"] = LanguageCodes["nl"] = ("Dutch", "Germanic")
LanguageCodes["nno"] = LanguageCodes["nn"] = ("Norwegian Nynorsk", "")
LanguageCodes["nob"] = LanguageCodes["nb"] = ("Norwegian Bokmål", "")
LanguageCodes["non"] = ("Norse, Old", "")
LanguageCodes["nor"] = LanguageCodes["no"] = ("Norwegian", "Germanic")
LanguageCodes["nso"] = ("Sotho, Northern", "")
LanguageCodes["nub"] = ("Nubian languages", "")
LanguageCodes["nya"] = LanguageCodes["ny"] = ("Chichewa; Nyanja", "")
LanguageCodes["nym"] = ("Nyamwezi", "")
LanguageCodes["nyn"] = ("Nyankole", "")
LanguageCodes["nyo"] = ("Nyoro", "")
LanguageCodes["nzi"] = ("Nzima", "")
LanguageCodes["oci"] = LanguageCodes["oc"] = ("Occitan (post 1500); Provençal", "Romance")
LanguageCodes["oji"] = ("Ojibwa", "")
LanguageCodes["ori"] = LanguageCodes["or"] = ("Oriya", "Indian")
LanguageCodes["orm"] = LanguageCodes["om"] = ("Oromo", "Hamitic")
LanguageCodes["osa"] = ("Osage", "")
LanguageCodes["oss"] = LanguageCodes["os"] = ("Ossetian; Ossetic", "")
LanguageCodes["ota"] = ("Turkish, Ottoman (1500-1928)", "")
LanguageCodes["oto"] = ("Otomian languages", "")
LanguageCodes["paa"] = ("Papuan (Other)", "")
LanguageCodes["pag"] = ("Pangasinan", "")
LanguageCodes["pal"] = ("Pahlavi", "")
LanguageCodes["pam"] = ("Pampanga", "")
LanguageCodes["pan"] = LanguageCodes["pa"] = ("Panjabi", "Indian")
LanguageCodes["pap"] = ("Papiamento", "")
LanguageCodes["pau"] = ("Palauan", "")
LanguageCodes["peo"] = ("Persian, Old (ca. 600-400 b.c.)", "")
LanguageCodes["phi"] = ("Philippine (Other)", "")
LanguageCodes["pli"] = LanguageCodes["pi"] = ("Pali", "")
LanguageCodes["pol"] = LanguageCodes["pl"] = ("Polish", "Slavic")
LanguageCodes["pon"] = ("Pohnpeian", "")
LanguageCodes["por"] = LanguageCodes["pt"] = ("Portuguese", "Romance")
LanguageCodes["pra"] = ("Prakrit languages", "")
LanguageCodes["pro"] = ("Provençal, Old (to 1500)", "")
LanguageCodes["pus"] = LanguageCodes["ps"] = ("Pushto", "Iranian")
LanguageCodes["que"] = LanguageCodes["qu"] = ("Quechua", "Amerindian")
LanguageCodes["raj"] = ("Rajasthani", "")
LanguageCodes["rap"] = ("Rapanui", "")
LanguageCodes["rar"] = ("Rarotongan", "")
LanguageCodes["roa"] = ("Romance (Other)", "")
LanguageCodes["rom"] = ("Romany", "")
LanguageCodes["ron"] = LanguageCodes["rum"] = LanguageCodes["ro"] = ("Romanian", "Romance")
LanguageCodes["run"] = LanguageCodes["rn"] = ("Rundi", "Negro-african")
LanguageCodes["rus"] = LanguageCodes["ru"] = ("Russian", "Slavic")
LanguageCodes["sad"] = ("Sandawe", "")
LanguageCodes["sag"] = LanguageCodes["sg"] = ("Sango", "Negro-african")
LanguageCodes["sah"] = ("Yakut", "")
LanguageCodes["sai"] = ("South American Indian (Other)", "")
LanguageCodes["sal"] = ("Salishan languages", "")
LanguageCodes["sam"] = ("Samaritan Aramaic", "")
LanguageCodes["san"] = LanguageCodes["sa"] = ("Sanskrit", "Indian")
LanguageCodes["sas"] = ("Sasak", "")
LanguageCodes["sat"] = ("Santali", "")
LanguageCodes["sco"] = ("Scots", "")
LanguageCodes["sel"] = ("Selkup", "")
LanguageCodes["sem"] = ("Semitic (Other)", "")
LanguageCodes["sga"] = ("Irish, Old (to 900)", "")
LanguageCodes["sgn"] = ("Sign Languages", "")
LanguageCodes["shn"] = ("Shan", "")
LanguageCodes["sid"] = ("Sidamo", "")
LanguageCodes["sin"] = LanguageCodes["si"] = ("Sinhalese", "Indian")
LanguageCodes["sio"] = ("Siouan languages", "")
LanguageCodes["sit"] = ("Sino-Tibetan (Other)", "")
LanguageCodes["sla"] = ("Slavic (Other)", "")
LanguageCodes["slk"] = LanguageCodes["slo"] = LanguageCodes["sk"] = ("Slovak", "Slavic")
LanguageCodes["slv"] = LanguageCodes["sl"] = ("Slovenian", "Slavic")
LanguageCodes["sme"] = LanguageCodes["se"] = ("Northern Sami", "")
LanguageCodes["smi"] = ("Sami languages (Other)", "")
LanguageCodes["smo"] = LanguageCodes["sm"] = ("Samoan", "Oceanic/indonesian")
LanguageCodes["sna"] = LanguageCodes["sn"] = ("Shona", "Negro-african")
LanguageCodes["snd"] = LanguageCodes["sd"] = ("Sindhi", "Indian")
LanguageCodes["snk"] = ("Soninke", "")
LanguageCodes["sog"] = ("Sogdian", "")
LanguageCodes["som"] = LanguageCodes["so"] = ("Somali", "Hamitic")
LanguageCodes["son"] = ("Songhai", "")
LanguageCodes["sot"] = LanguageCodes["st"] = ("Sotho, Southern", "Negro-african")
LanguageCodes["esl"] = LanguageCodes["spa"] = LanguageCodes["es"] = ("Spanish", "Romance")
LanguageCodes["sqi"] = LanguageCodes["alb"] = LanguageCodes["sq"] = ("Albanian", "Indo-european (other)")
LanguageCodes["srd"] = LanguageCodes["sc"] = ("Sardinian", "")
LanguageCodes["srp"] = LanguageCodes["scc"] = LanguageCodes["sr"] = ("Serbian", "Slavic")
LanguageCodes["srr"] = ("Serer", "")
LanguageCodes["ssa"] = ("Nilo-Saharan (Other)", "")
LanguageCodes["ssw"] = LanguageCodes["ss"] = ("Swati", "Negro-african")
LanguageCodes["suk"] = ("Sukuma", "")
LanguageCodes["sun"] = LanguageCodes["su"] = ("Sundanese", "Oceanic/indonesian")
LanguageCodes["sus"] = ("Susu", "")
LanguageCodes["sux"] = ("Sumerian", "")
LanguageCodes["swa"] = LanguageCodes["sw"] = ("Swahili", "Negro-african")
LanguageCodes["swe"] = LanguageCodes["sv"] = ("Swedish", "Germanic")
LanguageCodes["syr"] = ("Syriac", "")
LanguageCodes["tah"] = LanguageCodes["ty"] = ("Tahitian", "")
LanguageCodes["tai"] = ("Tai (Other)", "")
LanguageCodes["tam"] = LanguageCodes["ta"] = ("Tamil", "Dravidian")
LanguageCodes["tat"] = LanguageCodes["tt"] = ("Tatar", "Turkic/altaic")
LanguageCodes["tel"] = LanguageCodes["te"] = ("Telugu", "Dravidian")
LanguageCodes["tem"] = ("Timne", "")
LanguageCodes["ter"] = ("Tereno", "")
LanguageCodes["tet"] = ("Tetum", "")
LanguageCodes["tgk"] = LanguageCodes["tg"] = ("Tajik", "Iranian")
LanguageCodes["tgl"] = LanguageCodes["tl"] = ("Tagalog", "Oceanic/indonesian")
LanguageCodes["tha"] = LanguageCodes["th"] = ("Thai", "Asian")
LanguageCodes["tig"] = ("Tigre", "")
LanguageCodes["tir"] = LanguageCodes["ti"] = ("Tigrinya", "Semitic")
LanguageCodes["tiv"] = ("Tiv", "")
LanguageCodes["tkl"] = ("Tokelau", "")
LanguageCodes["tli"] = ("Tlingit", "")
LanguageCodes["tmh"] = ("Tamashek", "")
LanguageCodes["tog"] = ("Tonga (Nyasa)", "")
LanguageCodes["ton"] = LanguageCodes["to"] = ("Tonga (Tonga Islands)", "Oceanic/indonesian")
LanguageCodes["tpi"] = ("Tok Pisin", "")
LanguageCodes["tsi"] = ("Tsimshian", "")
LanguageCodes["tsn"] = LanguageCodes["tn"] = ("Tswana", "Negro-african")
LanguageCodes["tso"] = LanguageCodes["ts"] = ("Tsonga", "Negro-african")
LanguageCodes["tuk"] = LanguageCodes["tk"] = ("Turkmen", "Turkic/altaic")
LanguageCodes["tum"] = ("Tumbuka", "")
LanguageCodes["tur"] = LanguageCodes["tr"] = ("Turkish", "Turkic/altaic")
LanguageCodes["tut"] = ("Altaic (Other)", "")
LanguageCodes["tvl"] = ("Tuvalu", "")
LanguageCodes["twi"] = LanguageCodes["tw"] = ("Twi", "Negro-african")
LanguageCodes["tyv"] = ("Tuvinian", "")
LanguageCodes["uga"] = ("Ugaritic", "")
LanguageCodes["uig"] = LanguageCodes["ug"] = ("Uighur", "")
LanguageCodes["ukr"] = LanguageCodes["uk"] = ("Ukrainian", "Slavic")
LanguageCodes["umb"] = ("Umbundu", "")
LanguageCodes["und"] = ("Undetermined", "")
LanguageCodes["urd"] = LanguageCodes["ur"] = ("Urdu", "Indian")
LanguageCodes["uzb"] = LanguageCodes["uz"] = ("Uzbek", "Turkic/altaic")
LanguageCodes["vai"] = ("Vai", "")
LanguageCodes["ven"] = ("Venda", "")
LanguageCodes["vie"] = LanguageCodes["vi"] = ("Vietnamese", "Asian")
LanguageCodes["vol"] = LanguageCodes["vo"] = ("Volapük", "International aux.")
LanguageCodes["vot"] = ("Votic", "")
LanguageCodes["wak"] = ("Wakashan languages", "")
LanguageCodes["wal"] = ("Walamo", "")
LanguageCodes["war"] = ("Waray", "")
LanguageCodes["was"] = ("Washo", "")
LanguageCodes["wen"] = ("Sorbian languages", "")
LanguageCodes["wol"] = LanguageCodes["wo"] = ("Wolof", "Negro-african")
LanguageCodes["xho"] = LanguageCodes["xh"] = ("Xhosa", "Negro-african")
LanguageCodes["yao"] = ("Yao", "")
LanguageCodes["yap"] = ("Yapese", "")
LanguageCodes["yid"] = LanguageCodes["yi"] = ("Yiddish", "")
LanguageCodes["yor"] = LanguageCodes["yo"] = ("Yoruba", "Negro-african")
LanguageCodes["ypk"] = ("Yupik languages", "")
LanguageCodes["zap"] = ("Zapotec", "")
LanguageCodes["zen"] = ("Zenaga", "")
LanguageCodes["zha"] = LanguageCodes["za"] = ("Zhuang", "")
LanguageCodes["zho"] = LanguageCodes["chi"] = LanguageCodes["zh"] = ("Chinese", "Asian")
LanguageCodes["znd"] = ("Zande", "")
LanguageCodes["zul"] = LanguageCodes["zu"] = ("Zulu", "Negro-african")
LanguageCodes["zun"] = ("Zuni", "")
|
mwalli/spark-cloudant
|
refs/heads/master
|
examples/python/CloudantDF.py
|
3
|
#*******************************************************************************
# Copyright (c) 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import pprint
from pyspark.sql import SparkSession
# define cloudant related configuration
# set protocol to http if needed, default value=https
# config("cloudant.protocol","http")
spark = SparkSession\
.builder\
.appName("Cloudant Spark SQL Example in Python using dataframes")\
.config("cloudant.host","ACCOUNT.cloudant.com")\
.config("cloudant.username", "USERNAME")\
.config("cloudant.password","PASSWORD")\
.config("jsonstore.rdd.partitions", 8)\
.getOrCreate()
# ***1. Loading dataframe from Cloudant db
df = spark.read.load("n_airportcodemapping", "com.cloudant.spark")
# In case of doing multiple operations on a dataframe (select, filter etc.)
# you should persist the dataframe.
# Othewise, every operation on the dataframe will load the same data from Cloudant again.
# Persisting will also speed up computation.
df.cache() # persisting in memory
# alternatively for large dbs to persist in memory & disk:
# from pyspark import StorageLevel
# df.persist(storageLevel = StorageLevel(True, True, False, True, 1))
df.printSchema()
df.filter(df.airportName >= 'Moscow').select("_id",'airportName').show()
df.filter(df._id >= 'CAA').select("_id",'airportName').show()
# ***2. Saving a datafram to Cloudant db
df = spark.read.load(format="com.cloudant.spark", database="n_flight")
df.printSchema()
df2 = df.filter(df.flightSegmentId=='AA106')\
.select("flightSegmentId", "economyClassBaseCost")
df2.write.save("n_flight2", "com.cloudant.spark",
bulkSize = "100", createDBOnSave="true")
total = df.filter(df.flightSegmentId >'AA9').select("flightSegmentId",
"scheduledDepartureTime").orderBy(df.flightSegmentId).count()
print "Total", total, "flights from table"
# ***3. Loading dataframe from a Cloudant search index
df = spark.read.load(format="com.cloudant.spark", database="n_flight",
index="_design/view/_search/n_flights")
df.printSchema()
total = df.filter(df.flightSegmentId >'AA9').select("flightSegmentId",
"scheduledDepartureTime").orderBy(df.flightSegmentId).count()
print "Total", total, "flights from index"
# ***4. Loading dataframe from a Cloudant view
df = spark.read.load(format="com.cloudant.spark", path="n_flight",
view="_design/view/_view/AA0", schemaSampleSize="20")
# schema for view will always be: _id, key, value
# where value can be a complex field
df.printSchema()
df.show()
|
jakobabesser/pymus
|
refs/heads/master
|
pymus/sisa/f0_tracking/f0_tracking_peak_tracking.py
|
1
|
from scipy.signal import decimate
import numpy as np
from ...transform.transformer import Transformer
__author__ = 'Jakob Abesser'
class F0TrackerPeakTrackingAbesserDAFX2014:
def __init__(self,
blocksize=2048,
hopsize=128,
zero_padding_factor=8,
bins_per_octave=25*12,
down_sampling_factor=.5,
lower_mag_th_db=50.,
pitch_margin=2,
verbose=True,
visualize=False):
self.blocksize = float(blocksize)
self.hopsize = float(hopsize)
self.hopsize_correction = int(self.blocksize/self.hopsize)-1
self.zero_padding_factor = zero_padding_factor
self.bins_per_octave = bins_per_octave
self.down_sample_fac = down_sampling_factor
self.lower_mag_th_db = lower_mag_th_db
self.pitch_margin = pitch_margin
self.verbose = verbose
self.visualize = visualize
# maximum absolute deviation between neighbored frames
self.delta_bin_max = np.round(.2*self.bins_per_octave/12.)
# initialize
self.samples = None
self.sample_rate = None
self.tuning_frequency = None
self.num_frames = 0
self.num_samples = 0
self.num_notes = 0
self.global_f0_hz = None
def _down_sampling(self):
""" Signal downsampling
"""
if self.down_sample_fac != 1.:
self.samples = decimate(self.samples, int(1/self.down_sample_fac))
self.sample_rate *= self.down_sample_fac
def _define_time_axis(self):
""" Define global time axis, use frame centers for time stamps
"""
self.time_axis_samples = np.arange(self.num_frames) * self.hopsize + .5*self.blocksize
self.time_axis_sec = self.time_axis_samples / float(self.sample_rate)
def process(self,
samples,
sample_rate,
pitch,
onset,
duration,
tuning_frequency=440.):
""" Perform fundamental frequency tracking based on given score information (note parameters)
:param samples: (ndarray) Monaural sample vector
:param sample_rate: (int / float) Sample rate in Hz
:param pitch: (list / ndarray) Note MIDI pitch values
:param onset: (list / ndarray) Note onset times in secnods
:param duration: (list / ndarray) Note durations in secnods
:param tuning_frequency: (float) Tuning frequency (e.g. previously estimated using the pymus package)
:return: global_freq_hz: (ndarray) Frame-wise f0 values in Hz (0 where no contour is present)
:return: time_axis_sec: (ndarray) Corresponding time stamps in seconds
:return: contours (dict) Dict with contours parameters (size: num_contours) with following keys:
'f0_hz': (ndarray) frame-wise f0 values in Hz
'f0_cent_rel': (ndarray) frame-wise f0 deviations from annotated pitch in cent
't_sec': (ndarray) Time-stamps in sec
"""
# enforce numpy array format
if type(pitch) is not np.ndarray:
pitch = np.array(pitch)
if type(onset) is not np.ndarray:
onset = np.array(onset)
if type(pitch) is not np.ndarray:
duration = np.array(duration)
self.num_notes = len(pitch)
self.samples = samples
self.sample_rate = sample_rate
self.tuning_frequency = tuning_frequency
# note offsets
offset = np.array([onset[_] + duration[_] for _ in range(self.num_notes)])
# downsampling
self._down_sampling()
self.num_samples = len(self.samples)
self.num_frames = self.num_samples / self.hopsize
# set global time axis
self._define_time_axis()
self.global_f0_hz = np.zeros(len(self.time_axis_sec))
# iterate over all notes
contours = []
for n in range(self.num_notes):
if self.verbose:
if n % 20 == 1:
print('Process note {}/{} ...'.format(n, self.num_notes))
# track f0 contour for current note
f0_hz, f0_cent_rel, time_frame_idx = self._track_f0_contour(onset[n],
offset[n],
pitch[n])
# save f0 contour
curr_contour = dict()
curr_contour['f0_hz'] = f0_hz
curr_contour['f0_cent_rel'] = f0_cent_rel
curr_contour['t_sec'] = self.time_axis_sec[time_frame_idx]
contours.append(curr_contour)
# save contour values to global contour
self.global_f0_hz[time_frame_idx] = f0_hz
return self.global_f0_hz, self.time_axis_sec, contours
def _note_boundaries_in_global_time_axis(self, onset, offset):
""" Find note boundaries based on onset and offset time w.r.t. global time axis
:param onset: (float) Onset time in secnods
:param offset: (float) Offset time in seconds
:return: onset_frame_idx: (int) Note start frame index
:return: offset_frame_idx: (int) Note stop frame index
"""
start_sample = np.max([0, np.round(onset*self.sample_rate) - .5*self.blocksize])
end_sample = np.min([self.num_samples-1, np.round(offset*self.sample_rate) + .5*self.blocksize])
return int(round(start_sample/self.hopsize)), \
int(round(end_sample/self.hopsize))
def _scale_and_limit_spectrogram(self, spec):
""" Scale and limit magnitude spectrogram
:param spec: (2d ndarray) Magnitude spectrogram
:return: spec_scaled: (2d ndarray) Scaled magnitude spectrogram
"""
# scale spectrogram to [0,1]
spec -= np.min(spec)
spec /= np.max(spec)
# scale to dB magnitude
spec = 20*np.log10(spec + np.spacing(1))
# limit to lower threshold
spec[spec < -self.lower_mag_th_db] = -self.lower_mag_th_db
return spec
def _track_f0_contour(self,
onset,
offset,
pitch):
"""
Score-informed note-wise f0 tracking using the reassigned spectrogram
Reference:
J. Abesser et al., Note-wise Fundamental Frequency Contours in Trumpet and Saxophone
Jazz Improvisations, Proceedings of the DAFX conference, Erlangen, Germany, 2014
Args:
onset (float): Note onset
offset (float): Note offset
pitch (int): Note MIDI pitch
Returns:
f0_hz (ndarray): Frame-wise f0 values
f0_cent_rel (ndarray): Frame-wise deviations of f0 from target pitch (given
by note MIDI pitch and tuning frequency) in cent
frames (ndarray): Frame indices (of global time axis, see self.time_axis_samples)
"""
start_frame, end_frame = self._note_boundaries_in_global_time_axis(onset, offset)
# define logarithmically spaced frequency axis
log_f_axis_midi = np.arange(pitch-self.pitch_margin,
pitch+self.pitch_margin+12./self.bins_per_octave,
12./self.bins_per_octave)
log_f_axis_hz = self.tuning_frequency*2**((log_f_axis_midi-69.)/12.)
# compute reassigned spectrogram
start_sample = self.time_axis_samples[start_frame]
end_sample = self.time_axis_samples[end_frame]
spec, _, time_frame_sec, inst_freq_hz = Transformer.reassigned_spec(self.samples[start_sample:end_sample],
self.blocksize,
self.hopsize,
sample_rate=self.sample_rate,
freq_bin_hz=log_f_axis_hz,
n_fft=self.zero_padding_factor*self.blocksize)
# scale & limit spectrogram
spec = self._scale_and_limit_spectrogram(spec)
# fundamental frequency according to pitch & tuning frequency
f0_target_hz = self.tuning_frequency*2**((pitch-69)/12.)
# get absolute distance to f0 for all frame-wise magnitude maxima
bin_max = np.argmax(spec, axis=0)
abs_dist_to_f0_bin = np.abs(f0_target_hz - log_f_axis_hz[bin_max])
# find optimal starting frame for forwards-backwards tracking (look
# for frame with maximum peak closest to the annotated f0 value)
# (don't use argmin here since there could be multiple bins of interest)
start_frame_tracking = (abs_dist_to_f0_bin == np.min(abs_dist_to_f0_bin)).nonzero()[0]
# if we have multiple potential start positions -> take peak with highest magnitude
if len(start_frame_tracking) > 1:
mag = spec[bin_max[start_frame_tracking], start_frame_tracking]
start_frame_tracking = start_frame_tracking[np.argmax(mag)]
# perform forwards-backwards tracking based on continuity assumption
f0_bin = self._forwards_backwards_tracking(spec, start_frame_tracking, bin_max[start_frame_tracking], self.delta_bin_max)
# frame-wise fundamental frequency values
f0_hz = log_f_axis_hz[f0_bin]
# frame-wise f0 deviation from annotated pitch in cent
f0_cent_rel = (f0_bin - self.pitch_margin*self.bins_per_octave/12.)*1200./self.bins_per_octave
# plot tracking results over spectrogram
if self.visualize:
import matplotlib.pyplot as plt
plt.matshow(spec)
plt.hold(True)
plt.plot(bin_max, 'r-')
plt.show(block=False)
return f0_hz, f0_cent_rel, np.arange(start_frame, start_frame + len(f0_hz))
def _forwards_backwards_tracking(self, spec, idx_start, bin_start, delta_bin_max):
""" Forwards-backwards tracking of peak contour in a given magnitude spectrogram using a start position and a
proximity criterion
:param spec: (2d ndarray) Magnitude spectrogram (num_bins x num_frames)
:param idx_start: (int) Start frame index
:param bin_start: (int) Start frequency bin
:param delta_bin_max: (int) (onse-sided) search margin from one to the next frame
:return: peak_bin: (ndarray) Peak bin positions (num_frames)
"""
num_bins, num_frames = spec.shape
peak_bin = np.zeros(num_frames)
peak_bin[idx_start] = bin_start
# backwards tracking
if idx_start > 0:
for f in range(idx_start-1, -1, -1):
peak_bin[f] = self._find_proximate_peak(spec[:, f],
peak_bin[f+1],
delta_bin_max)
# forwards tracking
if idx_start < num_frames - 1:
for f in range(idx_start+1, num_frames):
peak_bin[f] = self._find_proximate_peak(spec[:, f],
peak_bin[f-1],
delta_bin_max)
return peak_bin.astype(int)
def _find_proximate_peak(self, spec, peak_bin_ref, delta_bin_max):
""" Find peak in spectogram frame based on proximity towards peak bin from previous frame (peak tracking using
proximity criterion)
:param spec: (ndarray) Magnitude spectrogram frame
:param peak_bin_ref: (int) Reference peak bin from previous frame
:param delta_bin_max: (int) (one-sided) search margin around peak_bin_ref
:return: peak_bin (int) Peak bin position in current frame
"""
min_bin, max_bin = self._get_margin_bin_boundaries(peak_bin_ref,
delta_bin_max,
len(spec))
return min_bin + np.argmax(spec[min_bin:max_bin+1])
@staticmethod
def _get_margin_bin_boundaries(mid_bin, bin_margin, num_bins):
""" Find lower and upper bin boundaries based on given middle bin (mid_bin), a bin margin (bin_margin) and the
axis boundaries (0, max_bin)
:param mid_bin: (int) Middle bin
:param bin_margin: (int) (one-sided) bin margin
:param num_bins: (int) Number of bins in axis
:return: min_bin: (int) Lower boundary bin index
:return: max_bin: (int) Upper boundary bin index
"""
return int(np.max((0, mid_bin-bin_margin))), int(np.min((num_bins-1, mid_bin+bin_margin)))
|
bonitadecker77/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/test/process_reader.py
|
166
|
"""Script used by test_process.TestTwoProcesses"""
# run until stdin is closed, then quit
import sys
while 1:
d = sys.stdin.read()
if len(d) == 0:
sys.exit(0)
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/testData/intentions/beforeParamTypeInGoogleDocStringOnlySummary.py
|
106
|
def f(<caret>x, y):
"""
Summary.
"""
|
dietrichc/streamline-ppc-reports
|
refs/heads/master
|
examples/dfp/v201411/inventory_service/create_ad_units.py
|
4
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new ad units.
To determine which ad units exist, run get_all_ad_units.py
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: InventoryService.createAdUnits
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
PARENT_AD_UNIT_ID = 'INSERT_AD_UNIT_ID_HERE'
def main(client, parent_id):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201411')
# Create ad unit size.
ad_unit_size = {
'size': {
'width': '300',
'height': '250'
},
'environmentType': 'BROWSER'
}
# Create ad unit objects.
ad_unit = {
'name': 'Ad_unit_%s' % uuid.uuid4(),
'parentId': parent_id,
'description': 'Ad unit description.',
'targetWindow': 'BLANK',
'targetPlatform': 'WEB',
'adUnitSizes': [ad_unit_size]
}
# Add ad units.
ad_units = inventory_service.createAdUnits([ad_unit])
# Display results.
for ad_unit in ad_units:
print ('Ad unit with ID \'%s\' and name \'%s\' was created.'
% (ad_unit['id'], ad_unit['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PARENT_AD_UNIT_ID)
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
script.module.cryptolib/lib/Crypto/Hash/SHA.py
|
16
|
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
# This file exists for backward compatibility with old code that refers to
# Crypto.Hash.SHA
from Crypto.Hash.SHA1 import __doc__, new, block_size, digest_size
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
crash/342_test_sys_settrace.py
|
99
|
# Testing the line trace facility.
from test import support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception as exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError as exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
self.addCleanup(sys.settrace, sys.gettrace())
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, func.events)
def test_set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def test_set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
namespace = {}
exec("def f():\n" + "\n" * 256 + " pass", namespace)
self.run_and_compare(
namespace["f"],
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in range(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not raised!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print(i) # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.__code__.co_firstlineno + 2):
raise RuntimeError("i am crashing")
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
def test_exception_arguments(self):
def f():
x = 0
# this should raise an error
x.no_such_attr
def g(frame, event, arg):
if (event == 'exception'):
type, exception, trace = arg
self.assertIsInstance(exception, Exception)
return g
existing = sys.gettrace()
try:
sys.settrace(g)
try:
f()
except AttributeError:
# this is expected
pass
finally:
sys.settrace(existing)
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.__code__:
firstLine = frame.f_code.co_firstlineno
if event == 'line' and frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError as e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError) as e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError as e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
def jump_across_with(output):
with open(support.TESTFN, "wb") as fp:
pass
with open(support.TESTFN, "wb") as fp:
pass
jump_across_with.jump = (1, 3)
jump_across_with.output = []
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError as e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError("Trace-function-less jump failed to fail")
class JumpTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(None)
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
# Must set sys.settrace(None) in setUp(), else condition is not
# triggered.
no_jump_without_trace_function()
def test_jump_across_with(self):
self.addCleanup(support.unlink, support.TESTFN)
self.run_test(jump_across_with)
def test_20_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
y = 1 # line 2
''' # line 3
%s # lines 4-1004
''' # line 1005
x += 1 # line 1006
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
f.jump = (2, 1007)
f.output = [0]
self.run_test(f)
def test_jump_to_firstlineno(self):
# This tests that PDB can jump back to the first line in a
# file. See issue #1689458. It can only be triggered in a
# function call if the function is defined on a single line.
code = compile("""
# Comments don't count.
output.append(2) # firstlineno is here.
output.append(3)
output.append(4)
""", "<fake module>", "exec")
class fake_function:
__code__ = code
jump = (2, 0)
tracer = JumpTracer(fake_function)
sys.settrace(tracer.trace)
namespace = {"output": []}
exec(code, namespace)
sys.settrace(None)
self.compare_jump_output([2, 3, 2, 3, 4], namespace["output"])
def test_main():
support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
|
andrewsosa/hackfsu_com
|
refs/heads/master
|
api/webapp/views/links/index.py
|
2
|
"""
Quick links
"""
from hackfsu_com.views.generic import PageView
class LinksPage(PageView):
template_name = 'links/index.html'
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/PIL/WalImageFile.py
|
52
|
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# NOTE: This format cannot be automatically recognized, so the reader
# is not registered for use with Image.open(). To open a WAL file, use
# the WalImageFile.open() function instead.
# This reader is based on the specification available from:
# http://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
# and has been tested with a few sample files found using google.
from __future__ import print_function
from PIL import Image, _binary
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
i32 = _binary.i32le
##
# Load texture from a Quake2 WAL texture file.
# <p>
# By default, a Quake2 standard palette is attached to the texture.
# To override the palette, use the <b>putpalette</b> method.
#
# @param filename WAL file name, or an opened file handle.
# @return An image instance.
def open(filename):
# FIXME: modify to return a WalImageFile instance instead of
# plain Image object ?
if hasattr(filename, "read"):
fp = filename
else:
fp = builtins.open(filename, "rb")
# read header fields
header = fp.read(32+24+32+12)
size = i32(header, 32), i32(header, 36)
offset = i32(header, 40)
# load pixel data
fp.seek(offset)
im = Image.frombytes("P", size, fp.read(size[0] * size[1]))
im.putpalette(quake2palette)
im.format = "WAL"
im.format_description = "Quake2 Texture"
# strings are null-terminated
im.info["name"] = header[:32].split(b"\0", 1)[0]
next_name = header[56:56+32].split(b"\0", 1)[0]
if next_name:
im.info["next_name"] = next_name
return im
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)
if __name__ == "__main__":
im = open("../hacks/sample.wal")
print(im.info, im.mode, im.size)
im.save("../out.png")
|
Yukarumya/Yukarum-Redfoxes
|
refs/heads/master
|
testing/mozharness/configs/mediatests/buildbot_windows_config.py
|
1
|
import os
import sys
import mozharness
external_tools_path = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
'external_tools',
)
config = {
"virtualenv_python_dll": 'c:/mozilla-build/python27/python27.dll',
"virtualenv_path": 'venv',
"exes": {
'python': 'c:/mozilla-build/python27/python',
'virtualenv': ['c:/mozilla-build/python27/python', 'c:/mozilla-build/buildbotve/virtualenv.py'],
'hg': 'c:/mozilla-build/hg/hg',
'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),
'%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],
'tooltool.py': [sys.executable, 'C:/mozilla-build/tooltool.py'],
},
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"buildbot_json_path": "buildprops.json",
"default_actions": [
'clobber',
'read-buildbot-config',
'download-and-extract',
'create-virtualenv',
'install',
'run-media-tests',
],
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file" : os.path.join(os.getcwd(), "oauth.txt"),
"in_tree_config": "config/mozharness/marionette.py",
"download_minidump_stackwalk": True,
"download_symbols": "ondemand",
"suite_definitions": {
"media-tests": {
"options": [],
},
"media-youtube-tests": {
"options": [
"%(test_manifest)s"
],
},
},
}
|
xiaoixa/python
|
refs/heads/master
|
NKUCodingCat/0013/0013.py
|
40
|
import requests,os,re,urllib
from lxml import etree
src = requests.get("""http://tieba.baidu.com/p/2166231880""").content
path = os.path.split(os.path.realpath(__file__))[0]+"/img/"
for i in etree.HTML(src.decode('utf-8', 'ignore')).xpath(u"//img"):
url = i.attrib["src"]
proto, rest = urllib.splittype(url)
host, rest = urllib.splithost(rest)
if host == "imgsrc.baidu.com":
urllib.urlretrieve(url, path+os.path.split(url)[1])
|
erwilan/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_users.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_users
short_description: Module to manage users in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage users in oVirt/RHV."
options:
name:
description:
- "Name of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
required: true
state:
description:
- "Should the user be present/absent."
choices: ['present', 'absent']
default: present
authz_name:
description:
- "Authorization provider of the user. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add user user1 from authorization provider example.com-authz
ovirt_users:
name: user1
domain: example.com-authz
# Add user user1 from authorization provider example.com-authz
# In case of Active Directory specify UPN:
ovirt_users:
name: user1@ad2.example.com
domain: example.com-authz
# Remove user user1 with authorization provider example.com-authz
ovirt_users:
state: absent
name: user1
authz_name: example.com-authz
'''
RETURN = '''
id:
description: ID of the user which is managed
returned: On success if user is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
user:
description: "Dictionary of all the user attributes. User attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
returned: On success if user is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
ovirt_full_argument_spec,
)
def username(module):
return '{}@{}'.format(module.params['name'], module.params['authz_name'])
class UsersModule(BaseModule):
def build_entity(self):
return otypes.User(
domain=otypes.Domain(
name=self._module.params['authz_name']
),
user_name=username(self._module),
principal=self._module.params['name'],
namespace=self._module.params['namespace'],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(required=True),
authz_name=dict(required=True, aliases=['domain']),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
users_service = connection.system_service().users_service()
users_module = UsersModule(
connection=connection,
module=module,
service=users_service,
)
state = module.params['state']
if state == 'present':
ret = users_module.create(
search_params={
'usrname': username(module),
}
)
elif state == 'absent':
ret = users_module.remove(
search_params={
'usrname': username(module),
}
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
razvanphp/arangodb
|
refs/heads/devel
|
3rdParty/V8-3.31.74.1/build/gyp/test/mac/gyptest-bundle-resources.py
|
193
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies things related to bundle resources.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(
os.path.join('resource.app/Contents/Resources', path), chdir=CHDIR)
in_stat = os.stat(os.path.join(CHDIR, path))
out_stat = os.stat(out_path)
if in_stat.st_mtime == out_stat.st_mtime:
test.fail_test()
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'bundle-resources'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_match('resource.app/Contents/Resources/secret.txt',
'abc\n', chdir=CHDIR)
test.built_file_must_match('source_rule.app/Contents/Resources/secret.txt',
'ABC\n', chdir=CHDIR)
test.built_file_must_match(
'resource.app/Contents/Resources/executable-file.sh',
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n', chdir=CHDIR)
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
check_attribs('secret.txt', expected_exec_bit=0)
# TODO(thakis): This currently fails with make.
if test.format != 'make':
test.built_file_must_match(
'resource_rule.app/Contents/Resources/secret.txt', 'ABC\n', chdir=CHDIR)
test.pass_test()
|
LynxyssCZ/Flexget
|
refs/heads/develop
|
flexget/tests/test_misc.py
|
6
|
# pylint: disable=no-self-use
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import text_type
import os
import stat
import pytest
from flexget.entry import EntryUnicodeError, Entry
class TestDisableBuiltins(object):
"""
Quick a hack, test disable functionality by checking if seen filtering (builtin) is working
"""
config = """
tasks:
test:
mock:
- {title: 'dupe1', url: 'http://localhost/dupe', 'imdb_score': 5}
- {title: 'dupe2', url: 'http://localhost/dupe', 'imdb_score': 5}
accept_all: yes
disable: builtins
test2:
mock:
- {title: 'dupe1', url: 'http://localhost/dupe', 'imdb_score': 5,
description: 'http://www.imdb.com/title/tt0409459/'}
- {title: 'dupe2', url: 'http://localhost/dupe', 'imdb_score': 5}
accept_all: yes
disable:
- seen
- cli_config
"""
def test_disable_builtins(self, execute_task):
# Execute the task once, then we'll make sure seen plugin isn't rejecting on future executions
execute_task('test')
task = execute_task('test')
assert task.find_entry('accepted', title='dupe1') and task.find_entry('accepted', title='dupe2'), \
'disable is not working?'
task = execute_task('test2')
assert task.find_entry(title='dupe1').accepted and task.find_entry('accepted', title='dupe2'), \
'disable is not working?'
@pytest.mark.online
class TestInputHtml(object):
config = """
tasks:
test:
html: http://download.flexget.com/
"""
def test_parsing(self, execute_task):
task = execute_task('test')
assert task.entries, 'did not produce entries'
class TestPriority(object):
config = """
tasks:
test:
mock:
- {title: 'Smoke hdtv'}
accept_all: yes
set:
quality: 720p
quality: 720p
plugin_priority:
set: 3
quality: 2
accept_all: 1
test2:
mock:
- {title: 'Smoke hdtv'}
accept_all: yes
set:
quality: 720p
quality: 720p
plugin_priority:
set: 3
quality: 2
accept_all: 1
"""
def test_smoke(self, execute_task):
task = execute_task('test')
assert task.accepted, 'set plugin should have changed quality before quality plugin was run'
task = execute_task('test2')
assert task.rejected, 'quality plugin should have rejected Smoke as hdtv'
class TestImmortal(object):
config = """
tasks:
test:
mock:
- {title: 'title1', immortal: yes}
- {title: 'title2'}
regexp:
reject:
- .*
"""
def test_immortal(self, execute_task):
task = execute_task('test')
assert task.find_entry(title='title1'), 'rejected immortal entry'
assert not task.find_entry(title='title2'), 'did not reject mortal'
@pytest.mark.online
class TestDownload(object):
config = """
tasks:
test:
mock:
- title: README
url: https://github.com/Flexget/Flexget/raw/develop/README.MD
filename: flexget_test_data
accept_all: true
download:
path: __tmp__
fail_html: no
"""
def test_download(self, execute_task, tmpdir):
# NOTE: what the hell is .obj and where it comes from?
# Re: seems to come from python mimetype detection in download plugin ...
# Re Re: Implemented in such way that extension does not matter?
# A little convoluted, but you have to set the umask in order to have
# the current value returned to you
curr_umask = os.umask(0)
os.umask(curr_umask)
# executes task and downloads the file
task = execute_task('test')
assert task.entries[0]['location'], 'location missing?'
testfile = task.entries[0]['location']
assert os.path.exists(testfile), 'download file does not exists'
testfile_stat = os.stat(testfile)
modes_equal = 0o666 & ~curr_umask == stat.S_IMODE(testfile_stat.st_mode)
assert modes_equal, 'download file mode not honoring umask'
class TestEntryUnicodeError(object):
def test_encoding(self):
e = Entry('title', 'url')
with pytest.raises(EntryUnicodeError):
e['invalid'] = b'\x8e'
class TestEntryStringCoercion(object):
def test_coercion(self):
class EnrichedString(text_type):
pass
e = Entry('title', 'url')
e['test'] = EnrichedString("test")
assert type(e['test']) == text_type # pylint: disable=unidiomatic-typecheck
class TestFilterRequireField(object):
config = """
tasks:
test:
mock:
- {title: 'Taken[2008]DvDrip[Eng]-FOO', imdb_url: 'http://www.imdb.com/title/tt0936501/'}
- {title: 'ASDFASDFASDF'}
require_field: imdb_url
test2:
mock:
- {title: 'Entry.S01E05.720p', series_name: 'Entry'}
- {title: 'Entry2.is.a.Movie'}
require_field: series_name
"""
def test_field_required(self, execute_task):
task = execute_task('test')
assert not task.find_entry('rejected', title='Taken[2008]DvDrip[Eng]-FOO'), \
'Taken should NOT have been rejected'
assert task.find_entry('rejected', title='ASDFASDFASDF'), \
'ASDFASDFASDF should have been rejected'
task = execute_task('test2')
assert not task.find_entry('rejected', title='Entry.S01E05.720p'), \
'Entry should NOT have been rejected'
assert task.find_entry('rejected', title='Entry2.is.a.Movie'), \
'Entry2 should have been rejected'
class TestHtmlUtils(object):
def test_decode_html(self):
"""utils decode_html"""
from flexget.utils.tools import decode_html
assert decode_html('<3') == u'<3'
assert decode_html('─') == u'\u2500'
@pytest.mark.skip(reason='FAILS - DISABLED')
def test_encode_html(self):
"""utils encode_html (FAILS - DISABLED)"""
# why this does not encode < ?
from flexget.utils.tools import encode_html
print(encode_html('<3'))
assert encode_html('<3') == '<3'
class TestSetPlugin(object):
config = """
templates:
global:
accept_all: yes
tasks:
test:
mock:
- {title: 'Entry 1'}
set:
thefield: TheValue
otherfield: 3.0
test_jinja:
mock:
- {title: 'Entry 1', series_name: 'Value'}
- {title: 'Entry 2'}
set:
field: 'The {{ series_name|upper }}'
otherfield: '{% if series_name is not defined %}no series{% endif %}'
alu: '{{ series_name|re_search(".l.") }}'
test_non_string:
mock:
- title: Entry 1
set:
bool: False
int: 42
test_lazy:
mock:
- title: Entry 1
set:
a: "the {{title}}"
test_lazy_err:
mock:
- title: Entry 1
set:
title: "{{ao"
other: "{{eaeou}"
"""
def test_set(self, execute_task):
task = execute_task('test')
entry = task.find_entry('entries', title='Entry 1')
assert entry['thefield'] == 'TheValue'
assert entry['otherfield'] == 3.0
def test_jinja(self, execute_task):
task = execute_task('test_jinja')
entry = task.find_entry('entries', title='Entry 1')
assert entry['field'] == 'The VALUE'
assert entry['otherfield'] == ''
assert entry['alu'] == 'alu'
entry = task.find_entry('entries', title='Entry 2')
assert entry['field'] is None, \
'`field` should be None when jinja rendering fails'
assert entry['otherfield'] == 'no series'
def test_non_string(self, execute_task):
task = execute_task('test_non_string')
entry = task.find_entry('entries', title='Entry 1')
assert entry['bool'] is False
assert entry['int'] == 42
def test_lazy(self, execute_task):
task = execute_task('test_lazy')
entry = task.find_entry('entries', title='Entry 1')
assert entry.is_lazy('a')
assert entry['a'] == 'the Entry 1'
def test_lazy_err(self, execute_task):
task = execute_task('test_lazy_err')
entry = task.find_entry('entries', title='Entry 1')
assert entry['title'] == 'Entry 1', 'should fall back to original value when template fails'
assert entry['other'] is None
|
Inter-Actief/alexia
|
refs/heads/master
|
alexia/apps/general/templatetags/jsonize.py
|
1
|
import json
from django.template import Library
from django.utils.safestring import mark_safe
register = Library()
@register.filter
def jsonize(obj):
return mark_safe(json.dumps(obj))
|
mdespriee/spark
|
refs/heads/master
|
examples/src/main/python/streaming/stateful_network_wordcount.py
|
51
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the
network every second.
Usage: stateful_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Spark Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/stateful_network_wordcount.py \
localhost 9999`
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: stateful_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonStreamingStatefulNetworkWordCount")
ssc = StreamingContext(sc, 1)
ssc.checkpoint("checkpoint")
# RDD with initial state (key, value) pairs
initialStateRDD = sc.parallelize([(u'hello', 1), (u'world', 1)])
def updateFunc(new_values, last_sum):
return sum(new_values) + (last_sum or 0)
lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2]))
running_counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda word: (word, 1))\
.updateStateByKey(updateFunc, initialRDD=initialStateRDD)
running_counts.pprint()
ssc.start()
ssc.awaitTermination()
|
saurabh6790/omnit-app
|
refs/heads/master
|
hr/doctype/job_applicant/job_applicant.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from utilities.transaction_base import TransactionBase
from webnotes.utils import extract_email_id
class DocType(TransactionBase):
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def get_sender(self, comm):
return webnotes.conn.get_value('Jobs Email Settings',None,'email_id')
def validate(self):
self.set_status()
|
schleichdi2/OPENNFR-6.0-CORE
|
refs/heads/master
|
bitbake/lib/toaster/tests/browser/test_all_projects_page.py
|
4
|
#! /usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Toaster Implementation
#
# Copyright (C) 2013-2016 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
from django.core.urlresolvers import reverse
from django.utils import timezone
from tests.browser.selenium_helpers import SeleniumTestCase
from orm.models import BitbakeVersion, Release, Project, Build
from orm.models import ProjectVariable
class TestAllProjectsPage(SeleniumTestCase):
""" Browser tests for projects page /projects/ """
PROJECT_NAME = 'test project'
CLI_BUILDS_PROJECT_NAME = 'command line builds'
MACHINE_NAME = 'delorean'
def setUp(self):
""" Add default project manually """
project = Project.objects.create_project(self.CLI_BUILDS_PROJECT_NAME, None)
self.default_project = project
self.default_project.is_default = True
self.default_project.save()
# this project is only set for some of the tests
self.project = None
self.release = None
def _add_build_to_default_project(self):
""" Add a build to the default project (not used in all tests) """
now = timezone.now()
build = Build.objects.create(project=self.default_project,
started_on=now,
completed_on=now)
build.save()
def _add_non_default_project(self):
""" Add another project """
bbv = BitbakeVersion.objects.create(name='test bbv', giturl='/tmp/',
branch='master', dirpath='')
self.release = Release.objects.create(name='test release',
branch_name='master',
bitbake_version=bbv)
self.project = Project.objects.create_project(self.PROJECT_NAME, self.release)
self.project.is_default = False
self.project.save()
# fake the MACHINE variable
project_var = ProjectVariable.objects.create(project=self.project,
name='MACHINE',
value=self.MACHINE_NAME)
project_var.save()
def _get_row_for_project(self, project_name):
""" Get the HTML row for a project, or None if not found """
self.wait_until_present('#projectstable tbody tr')
rows = self.find_all('#projectstable tbody tr')
# find the row with a project name matching the one supplied
found_row = None
for row in rows:
if re.search(project_name, row.get_attribute('innerHTML')):
found_row = row
break
return found_row
def test_default_project_hidden(self):
"""
The default project should be hidden if it has no builds
and we should see the "no results" area
"""
url = reverse('all-projects')
self.get(url)
self.wait_until_visible('#empty-state-projectstable')
rows = self.find_all('#projectstable tbody tr')
self.assertEqual(len(rows), 0, 'should be no projects displayed')
def test_default_project_has_build(self):
""" The default project should be shown if it has builds """
self._add_build_to_default_project()
url = reverse('all-projects')
self.get(url)
default_project_row = self._get_row_for_project(self.default_project.name)
self.assertNotEqual(default_project_row, None,
'default project "cli builds" should be in page')
def test_default_project_release(self):
"""
The release for the default project should display as
'Not applicable'
"""
# need a build, otherwise project doesn't display at all
self._add_build_to_default_project()
# another project to test, which should show release
self._add_non_default_project()
self.get(reverse('all-projects'))
self.wait_until_visible("#projectstable tr")
# find the row for the default project
default_project_row = self._get_row_for_project(self.default_project.name)
# check the release text for the default project
selector = 'span[data-project-field="release"] span.text-muted'
element = default_project_row.find_element_by_css_selector(selector)
text = element.text.strip()
self.assertEqual(text, 'Not applicable',
'release should be "not applicable" for default project')
# find the row for the default project
other_project_row = self._get_row_for_project(self.project.name)
# check the link in the release cell for the other project
selector = 'span[data-project-field="release"]'
element = other_project_row.find_element_by_css_selector(selector)
text = element.text.strip()
self.assertEqual(text, self.release.name,
'release name should be shown for non-default project')
def test_default_project_machine(self):
"""
The machine for the default project should display as
'Not applicable'
"""
# need a build, otherwise project doesn't display at all
self._add_build_to_default_project()
# another project to test, which should show machine
self._add_non_default_project()
self.get(reverse('all-projects'))
self.wait_until_visible("#projectstable tr")
# find the row for the default project
default_project_row = self._get_row_for_project(self.default_project.name)
# check the machine cell for the default project
selector = 'span[data-project-field="machine"] span.text-muted'
element = default_project_row.find_element_by_css_selector(selector)
text = element.text.strip()
self.assertEqual(text, 'Not applicable',
'machine should be not applicable for default project')
# find the row for the default project
other_project_row = self._get_row_for_project(self.project.name)
# check the link in the machine cell for the other project
selector = 'span[data-project-field="machine"]'
element = other_project_row.find_element_by_css_selector(selector)
text = element.text.strip()
self.assertEqual(text, self.MACHINE_NAME,
'machine name should be shown for non-default project')
def test_project_page_links(self):
"""
Test that links for the default project point to the builds
page /projects/X/builds for that project, and that links for
other projects point to their configuration pages /projects/X/
"""
# need a build, otherwise project doesn't display at all
self._add_build_to_default_project()
# another project to test
self._add_non_default_project()
self.get(reverse('all-projects'))
# find the row for the default project
default_project_row = self._get_row_for_project(self.default_project.name)
# check the link on the name field
selector = 'span[data-project-field="name"] a'
element = default_project_row.find_element_by_css_selector(selector)
link_url = element.get_attribute('href').strip()
expected_url = reverse('projectbuilds', args=(self.default_project.id,))
msg = 'link on default project name should point to builds but was %s' % link_url
self.assertTrue(link_url.endswith(expected_url), msg)
# find the row for the other project
other_project_row = self._get_row_for_project(self.project.name)
# check the link for the other project
selector = 'span[data-project-field="name"] a'
element = other_project_row.find_element_by_css_selector(selector)
link_url = element.get_attribute('href').strip()
expected_url = reverse('project', args=(self.project.id,))
msg = 'link on project name should point to configuration but was %s' % link_url
self.assertTrue(link_url.endswith(expected_url), msg)
|
NitishT/minio-py
|
refs/heads/master
|
minio/error.py
|
1
|
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio.error
~~~~~~~~~~~~~~~~~~~
This module provides custom exception classes for Minio library
and API specific errors.
:copyright: (c) 2015, 2016, 2017 by Minio, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
from xml.etree import cElementTree
from xml.etree.cElementTree import ParseError
if hasattr(cElementTree, 'ParseError'):
## ParseError seems to not have .message like other
## exceptions. Add dynamically new attribute carrying
## value from message.
if not hasattr(ParseError, 'message'):
setattr(ParseError, 'message', ParseError.msg)
_ETREE_EXCEPTIONS = (ParseError, AttributeError, ValueError, TypeError)
else:
_ETREE_EXCEPTIONS = (SyntaxError, AttributeError, ValueError, TypeError)
class MinioError(Exception):
"""
Base class for all exceptions
:param message: User defined message.
"""
def __init__(self, message, **kwargs):
super(MinioError, self).__init__(**kwargs)
self.message = message
def __str__(self):
return "{name}: message: {message}".format(
name=self.__class__.__name__,
message=self.message
)
class InvalidEndpointError(MinioError):
"""
InvalidEndpointError is raised when input endpoint URL is invalid.
"""
pass
class InvalidBucketError(MinioError):
"""
InvalidBucketError is raised when input bucket name is invalid.
NOTE: Bucket names are validated based on Amazon S3 requirements.
"""
pass
class InvalidArgumentError(MinioError):
"""
InvalidArgumentError is raised when an unexpected
argument is received by the callee.
"""
pass
class InvalidSizeError(MinioError):
"""
InvalidSizeError is raised when an unexpected size mismatch occurs.
"""
pass
class InvalidXMLError(MinioError):
"""
InvalidXMLError is raised when an unexpected XML tag or
a missing tag is found during parsing.
"""
pass
class MultiDeleteError(object):
"""
Represents an error raised when trying to delete an object in a
Multi-Object Delete API call :class:`MultiDeleteError <MultiDeleteError>`
:object_name: Object name that had a delete error.
:error_code: Error code.
:error_message: Error message.
"""
def __init__(self, object_name, err_code, err_message):
self.object_name = object_name
self.error_code = err_code
self.error_message = err_message
def __str__(self):
string_format = '<MultiDeleteError: object_name: {} error_code: {}' \
' error_message: {}>'
return string_format.format(self.object_name,
self.error_code,
self.error_message)
class ResponseError(MinioError):
"""
ResponseError is raised when an API call doesn't succeed.
raises :exc:`ResponseError` accordingly.
:param response: Response from http client :class:`urllib3.HTTPResponse`.
"""
def __init__(self, response, method, bucket_name=None,
object_name=None):
super(ResponseError, self).__init__(message='')
# initialize parameter fields
self._response = response
self._xml = response.data
self.method = method
self.bucket_name = bucket_name
self.object_name = object_name
# initialize all ResponseError fields
self.code = ''
# Amz headers
self.request_id = ''
self.host_id = ''
self.region = ''
# handle the error
self._handle_error_response(bucket_name)
def get_exception(self):
"""
Gets the error exception derived from the initialization of
an ErrorResponse object
:return: The derived exception or ResponseError exception
"""
exception = known_errors.get(self.code)
if exception:
return exception(self)
else:
return self
def _handle_error_response(self, bucket_name=None):
"""
Sets error response uses xml body if available, otherwise
relies on HTTP headers.
"""
if not self._response.data:
self._set_error_response_without_body(bucket_name)
else:
self._set_error_response_with_body(bucket_name)
def _set_error_response_with_body(self, bucket_name=None):
"""
Sets all the error response fields with a valid response body.
Raises :exc:`ValueError` if invoked on a zero length body.
:param bucket_name: Optional bucket name resource at which error
occurred.
:param object_name: Option object name resource at which error
occurred.
"""
if len(self._response.data) == 0:
raise ValueError('response data has no body.')
try:
root = cElementTree.fromstring(self._response.data)
except _ETREE_EXCEPTIONS as error:
raise InvalidXMLError('"Error" XML is not parsable. '
'Message: {0}'.format(error.message))
for attribute in root:
if attribute.tag == 'Code':
self.code = attribute.text
elif attribute.tag == 'BucketName':
self.bucket_name = attribute.text
elif attribute.tag == 'Key':
self.object_name = attribute.text
elif attribute.tag == 'Message':
self.message = attribute.text
elif attribute.tag == 'RequestId':
self.request_id = attribute.text
elif attribute.tag == 'HostId':
self.host_id = attribute.text
# Set amz headers.
self._set_amz_headers()
def _set_error_response_without_body(self, bucket_name=None):
"""
Sets all the error response fields from response headers.
"""
if self._response.status == 404:
if bucket_name:
if self.object_name:
self.code = 'NoSuchKey'
self.message = self._response.reason
else:
self.code = 'NoSuchBucket'
self.message = self._response.reason
elif self._response.status == 409:
self.code = 'Confict'
self.message = 'The bucket you tried to delete is not empty.'
elif self._response.status == 403:
self.code = 'AccessDenied'
self.message = self._response.reason
elif self._response.status == 400:
self.code = 'BadRequest'
self.message = self._response.reason
elif self._response.status == 301:
self.code = 'PermanentRedirect'
self.message = self._response.reason
elif self._response.status == 307:
self.code = 'Redirect'
self.message = self._response.reason
elif self._response.status in [405, 501]:
self.code = 'MethodNotAllowed'
self.message = self._response.reason
elif self._response.status == 500:
self.code = 'InternalError'
self.message = 'Internal Server Error.'
else:
self.code = 'UnknownException'
self.message = self._response.reason
# Set amz headers.
self._set_amz_headers()
def _set_amz_headers(self):
"""
Sets x-amz-* error response fields from response headers.
"""
if self._response.headers:
# keeping x-amz-id-2 as part of amz_host_id.
if 'x-amz-id-2' in self._response.headers:
self.host_id = self._response.headers['x-amz-id-2']
if 'x-amz-request-id' in self._response.headers:
self.request_id = self._response.headers['x-amz-request-id']
# This is a new undocumented field, set only if available.
if 'x-amz-bucket-region' in self._response.headers:
self.region = self._response.headers['x-amz-bucket-region']
def __str__(self):
return ('ResponseError: code: {0}, message: {1},'
' bucket_name: {2}, object_name: {3}, request_id: {4},'
' host_id: {5}, region: {6}'.format(self.code,
self.message,
self.bucket_name,
self.object_name,
self.request_id,
self.host_id,
self.region))
# Common error responses listed here
# http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.htmlRESTErrorResponses
class KnownResponseError(MinioError):
def __init__(self, response_error, **kwargs):
super(KnownResponseError, self).__init__(message=self.message, **kwargs)
self.response_error = response_error
class AccessDenied(KnownResponseError):
message = 'Access Denied'
class AccountProblem(KnownResponseError):
message = 'There is a problem with your account that prevents the ' \
'operation from completing successfully.'
class AmbiguousGrantByEmailAddress(KnownResponseError):
message = 'The email address you provided is associated with ' \
'more than one account.'
class BadDigest(KnownResponseError):
message = 'The Content-MD5 you specified did not match what we received.'
class BucketAlreadyExists(KnownResponseError):
message = 'The requested bucket name is not available. The ' \
'bucket namespace is shared by all users of the system. ' \
'Please select a different name and try again.'
class BucketAlreadyOwnedByYou(KnownResponseError):
message = 'Your previous request to create the named bucket ' \
'succeeded and you already own it.'
class BucketNotEmpty(KnownResponseError):
message = 'The bucket you tried to delete is not empty.'
class CredentialNotSupported(KnownResponseError):
message = 'This request does not support credentials.'
class CrossLocationLoggingProhibited(KnownResponseError):
message = 'Cross-location logging not allowed. Buckets in one ' \
'geographic location cannot log information to a bucket ' \
'in another location.'
class EntityTooSmall(KnownResponseError):
message = 'Your proposed upload is smaller than the minimum a' \
'llowed object size.'
class EntityTooLarge(KnownResponseError):
message = 'Your proposed upload exceeds the maximum allowed object size.'
class ExpiredToken(KnownResponseError):
message = 'The provided token has expired.'
class IllegalVersioningConfigurationException(KnownResponseError):
message = 'Indicates that the versioning configuration specified ' \
'in the request is invalid.'
class IncompleteBody(KnownResponseError):
message = 'You did not provide the number of bytes specified by the ' \
'Content-Length HTTP header'
class IncorrectNumberOfFilesInPostRequest(KnownResponseError):
message = 'POST requires exactly one file upload per request.'
class InlineDataTooLarge(KnownResponseError):
message = 'Inline data exceeds the maximum allowed size.'
class InternalError(KnownResponseError):
message = 'We encountered an internal error. Please try again.'
class InvalidAccessKeyId(KnownResponseError):
message = 'The access key Id you provided does not exist in our records.'
class InvalidAddressingHeader(KnownResponseError):
message = 'You must specify the Anonymous role.'
class InvalidArgument(KnownResponseError):
message = 'Invalid Argument'
class InvalidBucketName(KnownResponseError):
message = 'The specified bucket is not valid.'
class InvalidBucketState(KnownResponseError):
message = 'The request is not valid with the current state of the bucket.'
class InvalidDigest(KnownResponseError):
message = 'The Content-MD5 you specified is not valid.'
class InvalidEncryptionAlgorithmError(KnownResponseError):
message = 'The encryption request you specified is not valid. ' \
'The valid value is AES256.'
class InvalidLocationConstraint(KnownResponseError):
message = 'The specified location constraint is not valid.'
class InvalidObjectState(KnownResponseError):
message = 'The operation is not valid for the current state of the object.'
class InvalidPart(KnownResponseError):
message = 'One or more of the specified parts could not be found. ' \
'The part might not have been uploaded, or the specified ' \
'entity tag might not have matched the part\'s entity tag'
class InvalidPartOrder(KnownResponseError):
message = 'The list of parts was not in ascending order.Parts list ' \
'must specified in order by part number.'
class InvalidPayer(KnownResponseError):
message = 'All access to this object has been disabled.'
class InvalidPolicyDocument(KnownResponseError):
message = 'The content of the form does not meet the conditions ' \
'specified in the policy document.'
class InvalidRange(KnownResponseError):
message = 'The requested range cannot be satisfied.'
class InvalidRequest(KnownResponseError):
message = 'Invalid Request'
class InvalidSecurity(KnownResponseError):
message = 'The provided security credentials are not valid.'
class InvalidSOAPRequest(KnownResponseError):
message = 'The SOAP request body is invalid.'
class InvalidStorageClass(KnownResponseError):
message = 'The storage class you specified is not valid.'
class InvalidTargetBucketForLogging(KnownResponseError):
message = 'The target bucket for logging does not exist, ' \
'is not owned by you, or does not have the appropriate ' \
'grants for the log-delivery group.'
class InvalidToken(KnownResponseError):
message = 'The provided token is malformed or otherwise invalid.'
class InvalidURI(KnownResponseError):
message = 'Couldn\'t parse the specified URI.'
class KeyTooLong(KnownResponseError):
message = 'Your key is too long.'
class MalformedACLError(KnownResponseError):
message = 'The XML you provided was not well-formed ' \
'or did not validate against our published schema.'
class MalformedPOSTRequest(KnownResponseError):
message = 'The body of your POST request is not ' \
'well-formed multipart/form-data.'
class MalformedXML(KnownResponseError):
message = 'This happens when the user sends malformed xml (xml that ' \
'doesn\'t conform to the published xsd) for the configuration.'
class MaxMessageLengthExceeded(KnownResponseError):
message = 'Your request was too big.'
class MaxPostPreDataLengthExceededError(KnownResponseError):
message = 'Your POST request fields preceding the ' \
'upload file were too large.'
class MetadataTooLarge(KnownResponseError):
message = 'Your metadata headers exceed the maximum allowed metadata size.'
class MethodNotAllowed(KnownResponseError):
message = 'The specified method is not allowed against this resource'
class MissingAttachment(KnownResponseError):
message = 'A SOAP attachment was expected, but none were found.'
class MissingContentLength(KnownResponseError):
message = 'You must provide the Content-Length HTTP header.'
class MissingRequestBodyError(KnownResponseError):
message = 'This happens when the user sends an empty xml document ' \
'as a request. The error message is, "Request body is empty."'
class MissingSecurityElement(KnownResponseError):
message = 'The SOAP 1.1 request is missing a security element.'
class MissingSecurityHeader(KnownResponseError):
message = 'Your request is missing a required header.'
class NoLoggingStatusForKey(KnownResponseError):
message = 'There is no such thing as a logging ' \
'status subresource for a key.'
class NoSuchBucket(KnownResponseError):
message = 'The specified bucket does not exist.'
class NoSuchKey(KnownResponseError):
message = 'The specified key does not exist.'
class NoSuchLifecycleConfiguration(KnownResponseError):
message = 'The lifecycle configuration does not exist.'
class NoSuchUpload(KnownResponseError):
message = 'The specified multipart upload does not exist. ' \
'The upload ID might be invalid, or the multipart \
upload might have been aborted or completed.'
class NoSuchVersion(KnownResponseError):
message = 'Indicates that the version ID specified in the ' \
'request does not match an existing version.'
class APINotImplemented(KnownResponseError):
message = 'A header you provided implies functionality ' \
'that is not implemented.'
class NotSignedUp(KnownResponseError):
message = 'Your account is not signed up.'
class NoSuchBucketPolicy(KnownResponseError):
message = 'The specified bucket does not have a bucket policy.'
class OperationAborted(KnownResponseError):
message = 'A conflicting conditional operation is currently in ' \
'progress against this resource. Try again.'
class PermanentRedirect(KnownResponseError):
message = 'The bucket you are attempting to access must be addressed ' \
'using the specified endpoint. Send all future requests ' \
'to this endpoint.'
class PreconditionFailed(KnownResponseError):
message = 'At least one of the preconditions you specified did not hold.'
class Redirect(KnownResponseError):
message = 'Temporary redirect.'
class RestoreAlreadyInProgress(KnownResponseError):
message = 'Object restore is already in progress.'
class RequestIsNotMultiPartContent(KnownResponseError):
message = 'Bucket POST must be of the enclosure-type multipart/form-data.'
class RequestTimeout(KnownResponseError):
message = 'Your socket connection to the server was not read ' \
'from or written to within the timeout period.'
class RequestTimeTooSkewed(KnownResponseError):
message = 'The difference between the request time and the ' \
'server\'s time is too large.'
class RequestTorrentOfBucketError(KnownResponseError):
message = 'Requesting the torrent file of a bucket is not permitted.'
class SignatureDoesNotMatch(KnownResponseError):
message = 'The request signature we calculated does not match the ' \
'signature you provided.'
class ServiceUnavailable(KnownResponseError):
message = 'Reduce your request rate.'
class SlowDown(KnownResponseError):
message = 'Reduce your request rate.'
class TemporaryRedirect(KnownResponseError):
message = 'You are being redirected to the bucket while DNS updates.'
class TokenRefreshRequired(KnownResponseError):
message = 'The provided token must be refreshed.'
class TooManyBuckets(KnownResponseError):
message = 'You have attempted to create more buckets than allowed.'
class UnexpectedContent(KnownResponseError):
message = 'This request does not support content.'
class UnresolvableGrantByEmailAddress(KnownResponseError):
message = 'The email address you provided does not match any account ' \
'on record.'
class UserKeyMustBeSpecified(KnownResponseError):
message = 'The bucket POST must contain the specified field name. ' \
'If it is specified, check the order of the fields.'
known_errors = {
'AccessDenied': AccessDenied,
'AcccountProblem': AccountProblem,
'AmbiguousGrantByEmailAddress': AmbiguousGrantByEmailAddress,
'BadDigest': BadDigest,
'BucketAlreadyExists': BucketAlreadyExists,
'BucketAlreadyOwnedByYou': BucketAlreadyOwnedByYou,
'BucketNotEmpty': BucketNotEmpty,
'CredentialNotSupported': CredentialNotSupported,
'CrossLocationLoggingProhibited': CrossLocationLoggingProhibited,
'EntityTooSmall': EntityTooSmall,
'EntityTooLarge': EntityTooLarge,
'ExpiredToken': ExpiredToken,
'IllegalVersioningConfigurationException': IllegalVersioningConfigurationException,
'IncompleteBody': IncompleteBody,
'IncorrectNumberOfFilesInPostRequest': IncorrectNumberOfFilesInPostRequest,
'InlineDataTooLarge': InlineDataTooLarge,
'InternalError': InternalError,
'InvalidAccessKeyId': InvalidAccessKeyId,
'InvalidAddressingHeader': InvalidAddressingHeader,
'InvalidArgument': InvalidArgument,
'InvalidBucketName': InvalidBucketName,
'InvalidBucketState': InvalidBucketState,
'InvalidDigest': InvalidDigest,
'InvalidEncryptionAlgorithmError': InvalidEncryptionAlgorithmError,
'InvalidLocationConstraint': InvalidLocationConstraint,
'InvalidObjectState': InvalidObjectState,
'InvalidPart': InvalidPart,
'InvalidPartOrder': InvalidPartOrder,
'InvalidPayer': InvalidPayer,
'InvalidPolicyDocument': InvalidPolicyDocument,
'InvalidRange': InvalidRange,
'InvalidRequest': InvalidRequest,
'InvalidSecurity': InvalidSecurity,
'InvalidSOAPRequest': InvalidSOAPRequest,
'InvalidStorageClass': InvalidStorageClass,
'InvalidTargetBucketForLogging': InvalidTargetBucketForLogging,
'InvalidToken': InvalidToken,
'InvalidURI': InvalidURI,
'KeyTooLong': KeyTooLong,
'MalformedACLError': MalformedACLError,
'MalformedPOSTRequest': MalformedPOSTRequest,
'MalformedXML': MalformedXML,
'MaxMessageLengthExceeded': MaxMessageLengthExceeded,
'MaxPostPreDataLengthExceededError': MaxPostPreDataLengthExceededError,
'MetadataTooLarge': MetadataTooLarge,
'MethodNotAllowed': MethodNotAllowed,
'MissingAttachment': MissingAttachment,
'MissingContentLength': MissingContentLength,
'MissingRequestBodyError': MissingRequestBodyError,
'MissingSecurityElement': MissingSecurityElement,
'MissingSecurityHeader': MissingSecurityHeader,
'NoLoggingStatusForKey': NoLoggingStatusForKey,
'NoSuchBucket': NoSuchBucket,
'NoSuchKey': NoSuchKey,
'NoSuchLifecycleConfiguration': NoSuchLifecycleConfiguration,
'NoSuchUpload': NoSuchUpload,
'NoSuchVersion': NoSuchVersion,
'NotImplemented': APINotImplemented,
'NotSignedUp': NotSignedUp,
'NoSuchBucketPolicy': NoSuchBucketPolicy,
'OperationAborted': OperationAborted,
'PermanentRedirect': PermanentRedirect,
'PreconditionFailed': PreconditionFailed,
'Redirect': Redirect,
'RestoreAlreadyInProgress': RestoreAlreadyInProgress,
'RequestIsNotMultiPartContent': RequestIsNotMultiPartContent,
'RequestTimeout': RequestTimeout,
'RequestTimeTooSkewed': RequestTimeTooSkewed,
'RequestTorrentOfBucketError': RequestTorrentOfBucketError,
'SignatureDoesNotMatch': SignatureDoesNotMatch,
'ServiceUnavailable': ServiceUnavailable,
'SlowDown': SlowDown,
'TemporaryRedirect': TemporaryRedirect,
'TokenRefreshRequired': TokenRefreshRequired,
'TooManyBuckets': TooManyBuckets,
'UnexpectedContent': UnexpectedContent,
'UnresolvableGrantByEmailAddress': UnresolvableGrantByEmailAddress,
'UserKeyMustBeSpecified': UserKeyMustBeSpecified,
}
|
rafaelvieiras/script.pseudotv.live
|
refs/heads/master
|
resources/lib/language.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014 Martijn Kaijser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#import modules
import xbmc
import xbmcaddon
LANGUAGES = {'Albanian' : 'sq',
'Arabic' : 'ar',
'Belarusian': 'hy',
'Bosnian' : 'bs',
'Bulgarian' : 'bg',
'Catalan' : 'ca',
'Chinese' : 'zh',
'Croatian' : 'hr',
'Czech' : 'cs',
'Danish' : 'da',
'Dutch' : 'nl',
'English' : 'en',
'Estonian' : 'et',
'Persian' : 'fa',
'Finnish' : 'fi',
'French' : 'fr',
'German' : 'de',
'Greek' : 'el',
'Hebrew' : 'he',
'Hindi' : 'hi',
'Hungarian' : 'hu',
'Icelandic' : 'is',
'Indonesian': 'id',
'Italian' : 'it',
'Japanese' : 'ja',
'Korean' : 'ko',
'Latvian' : 'lv',
'Lithuanian': 'lt',
'Macedonian': 'mk',
'Norwegian' : 'no',
'Polish' : 'pl',
'Portuguese': 'pt',
'Portuguese (Brazil)': 'pb',
'Romanian' : 'ro',
'Russian' : 'ru',
'Serbian' : 'sr',
'Slovak' : 'sk',
'Slovenian' : 'sl',
'Spanish' : 'es',
'Swedish' : 'sv',
'Thai' : 'th',
'Turkish' : 'tr',
'Ukrainian' : 'uk',
'Vietnamese': 'vi',
'BosnianLatin': 'bs',
'Farsi' : 'fa',
'Serbian (Cyrillic)': 'sr',
'Chinese (Traditional)' : 'zh',
'Chinese (Simplified)' : 'zh'}
def get_abbrev():
language = xbmcaddon.Addon().getSetting('limit_preferred_language')
if language in LANGUAGES:
return LANGUAGES[language]
else:
### Default to English
return 'en'
def get_language(abbrev):
try:
lang_string = (key for key,value in LANGUAGES.items() if value == abbrev).next()
except StopIteration:
lang_string = 'n/a'
return lang_string
|
haozhangphd/genx-py3
|
refs/heads/master
|
genx/filehandling.py
|
1
|
'''I/O functions for GenX.
These include loading of initilazation files.
Also included is the config object.
File started by: Matts Bjorck
$Rev:: $: Revision of last commit
$Author:: $: Author of last commit
$Date:: $: Date of last commit
'''
from six.moves import configparser as CP
import io
import six
import os
import sys
import h5py
StringIO = six.StringIO
# Functions to save the gx files
#==============================================================================
def save_file(fname, model, optimizer, config):
"""Saves objects model, optimiser and config into file fnmame
:param fname: string, ending with .gx3, .gx or .hgx
:param model:
:param optimizer:
:param config:
:return:
"""
if sys.version_info.major == 3:
if fname.endswith('.gx3'):
save_gx(fname, model, optimizer, config)
elif fname.endswith('.hgx'):
save_hgx(fname, model, optimizer, config)
else:
raise IOError('Wrong file ending, should be .gx3 or .hgx')
else:
if fname.endswith('.gx'):
save_gx(fname, model, optimizer, config)
elif fname.endswith('.hgx'):
save_hgx(fname, model, optimizer, config)
else:
raise IOError('Wrong file ending, should be .gx or .hgx')
model.filename = os.path.abspath(fname)
model.saved = True
def load_file(fname, model, optimizer, config):
"""Loads parameters from fname into model, optimizer and config"""
if sys.version_info.major == 3:
if fname.endswith('.gx3') or fname.endswith('.gx'):
load_gx(fname, model, optimizer, config)
elif fname.endswith('.hgx'):
load_hgx(fname, model, optimizer, config)
else:
raise IOError('Wrong file ending, should be .gx3, .gx or .hgx')
else:
if fname.endswith('.gx'):
load_gx(fname, model, optimizer, config)
elif fname.endswith('.hgx'):
load_hgx(fname, model, optimizer, config)
else:
raise IOError('Wrong file ending, should be .gx or .hgx')
model.filename = os.path.abspath(fname)
def save_gx(fname, model, optimizer, config):
model.save(fname)
model.save_addition('config', config.model_dump())
model.save_addition('optimizer',
optimizer.pickle_string(clear_evals =
not config.get_boolean('solver',
'save all evals')))
def save_hgx(fname, model, optimizer, config, group='current'):
""" Saves the current objects to a hdf gx file (hgx).
:param fname: filename
:param model: model object
:param optimizer: optimizer object
:param config: config object
:param group: name of the group, default current
:return:
"""
f = h5py.File(fname, 'w')
g = f.create_group(group)
model.write_h5group(g)
try:
clear_evals = not config.get_boolean('solver', 'save all evals')
except OptionError as e:
clear_evals = True
optimizer.write_h5group(g.create_group('optimizer'), clear_evals=True)
g['config'] = config.model_dump().encode('latin1')
f.close()
def load_hgx(fname, model, optimizer, config, group='current'):
""" Loads the current objects to a hdf gx file (hgx).
:param fname: filename
:param model: model object
:param optimizer: optimizer object
:param config: config object
:param group: name of the group, default current
:return:
"""
f = h5py.File(fname, 'r')
g = f[group]
model.read_h5group(g)
optimizer.read_h5group(g['optimizer'])
config.load_model(g['config'].value.decode('latin1'))
f.close()
# Not yet used ...
def load_gx(fname, model, optimizer, config):
model.load(fname)
config.load_model(model.load_addition('config'))
optimizer.pickle_load(model.load_addition('optimizer').encode('latin1'))
# Functions to handle optimiser configs
#==============================================================================
def load_opt_config(optimizer, config):
"""Load the config (Config class) values to the optimiser class (DiffEv class)."""
class Container:
error_bars_level = 1.05
save_all_evals = False
def set_error_bars_level(self, val):
self.error_bars_level = val
def set_save_all_evals(self, val):
self.save_all_evals = val
c = Container()
# Define all the options we want to set
options_float = ['km', 'kr', 'pop mult', 'pop size',\
'max generations', 'max generation mult',\
'sleep time','max log elements',
'errorbar level',
'autosave interval', 'parallel processes',
'parallel chunksize', 'allowed fom discrepancy']
setfunctions_float = [optimizer.set_km, optimizer.set_kr,
optimizer.set_pop_mult,
optimizer.set_pop_size,
optimizer.set_max_generations,
optimizer.set_max_generation_mult,
optimizer.set_sleep_time,
optimizer.set_max_log,
c.set_error_bars_level,
optimizer.set_autosave_interval,
optimizer.set_processes,
optimizer.set_chunksize,
optimizer.set_fom_allowed_dis,
]
options_bool = ['use pop mult', 'use max generations',
'use start guess', 'use boundaries',
'use parallel processing', 'use autosave',
'save all evals'
]
setfunctions_bool = [optimizer.set_use_pop_mult,
optimizer.set_use_max_generations,
optimizer.set_use_start_guess,
optimizer.set_use_boundaries,
optimizer.set_use_parallel_processing,
optimizer.set_use_autosave,
c.set_save_all_evals,
]
# Make sure that the config is set
if config:
# Start witht the float values
for index in range(len(options_float)):
try:
val = config.get_float('solver', options_float[index])
except OptionError as e:
print('Could not locate option solver.' + options_float[index])
else:
setfunctions_float[index](val)
# Then the bool flags
for index in range(len(options_bool)):
try:
val = config.get_boolean('solver', options_bool[index])
except OptionError as e:
print('Could not read option solver.' + options_bool[index])
else:
setfunctions_bool[index](val)
try:
val = config.get('solver', 'create trial')
except OptionError as e:
print('Could not read option solver.create trial')
else:
try:
optimizer.set_create_trial(val)
except LookupError:
print('The mutation scheme %s does not exist'%val)
return c.error_bars_level, c.save_all_evals
def save_opt_config(optimizer, config, fom_error_bars_level=1.05, save_all_evals=False):
""" Write the config values from optimizer (DiffEv class) to config (Config class) """
# Define all the options we want to set
options_float = ['km', 'kr', 'pop mult', 'pop size',\
'max generations', 'max generation mult',\
'sleep time', 'max log elements', 'errorbar level',\
'autosave interval',\
'parallel processes', 'parallel chunksize',
'allowed fom discrepancy']
set_float = [optimizer.km, optimizer.kr,
optimizer.pop_mult,\
optimizer.pop_size,\
optimizer.max_generations,\
optimizer.max_generation_mult,\
optimizer.sleep_time,\
optimizer.max_log, \
fom_error_bars_level,\
optimizer.autosave_interval,\
optimizer.processes,\
optimizer.chunksize,\
optimizer.fom_allowed_dis
]
options_bool = ['use pop mult', 'use max generations',
'use start guess', 'use boundaries',
'use parallel processing', 'use autosave',
'save all evals',
]
set_bool = [optimizer.use_pop_mult,
optimizer.use_max_generations,
optimizer.use_start_guess,
optimizer.use_boundaries,
optimizer.use_parallel_processing,
optimizer.use_autosave,
save_all_evals,
]
# Make sure that the config is set
if config:
# Start witht the float values
for index in range(len(options_float)):
try:
config.set('solver', options_float[index], set_float[index])
except io.OptionError as e:
print('Could not locate save solver.' + options_float[index])
# Then the bool flags
for index in range(len(options_bool)):
try:
config.set('solver', options_bool[index], set_bool[index])
except OptionError as e:
print('Could not write option solver.' + options_bool[index])
try:
config.set('solver', 'create trial', optimizer.get_create_trial())
except OptionError as e:
print('Could not write option solver.create trial')
#==============================================================================
class Config:
def __init__(self):
self.default_config = CP.ConfigParser()
self.model_config = CP.ConfigParser()
def load_default(self, filename):
'''load_default(self, filename) --> None
Loads the default config from file filename. Raises a IOError if the
can not be found.
'''
try:
self.default_config.read(filename)
except Exception as e:
# print(e)
raise IOError('Could not load default config file', filename)
def write_default(self, filename):
'''write_default(self, filename) --> None
Writes the current defualt configuration to filename
'''
try:
with open(filename, 'wb') as cfile:
self.default_config.write(cfile)
except Exception as e:
print(e)
raise IOError('Could not write default config file', filename)
def load_model(self, str):
'''load_model(self, str) --> None
Loads a config from a string str. Raises an IOError if the string can not be
read.
'''
buffer = StringIO(str)
self.model_config = CP.ConfigParser()
try:
if sys.version_info.major == 3:
self.model_config.read_file(buffer)
else:
self.model_config.readfp(buffer)
except Exception as e:
raise IOError('Could not load model config file')
def _getf(self, default_function, model_function, section, option):
'''_getf(default_function, model_function, section, option) --> object
For the function function try to locate the section and option first in
model_config if that fails try to locate it in default_config. If both
fails raise an OptionError.
'''
value = 0
try:
value = model_function(section, option)
except Exception as e:
try:
value = default_function(section, option)
except Exception as e:
raise OptionError(section, option)
return value
def get_float(self, section, option):
'''get_float(self, section, option) --> float
returns a float value if possible for option in section
'''
return self._getf(self.default_config.getfloat,\
self.model_config.getfloat, section, option)
def get_boolean(self, section, option):
'''get_boolean(self, section, option) --> boolean
returns a boolean value if possible for option in section
'''
return self._getf(self.default_config.getboolean,\
self.model_config.getboolean, section, option)
def get_int(self, section, option):
'''get_int(self, section, option) --> int
returns a int value if possible for option in section
'''
return self._getf(self.default_config.getint,\
self.model_config.getint, section, option)
def get(self, section, option):
'''get(self, section, option) --> string
returns a string value if possible for option in section
'''
return self._getf(self.default_config.get,\
self.model_config.get, section, option)
def model_set(self, section, option, value):
'''model_set(self, section, option, value) --> None
Set a value in section, option for the model configuration.
'''
if not self.model_config.has_section(section):
self.model_config.add_section(section)
self.model_config.set(section, option, str(value))
#print 'Model set: ', section, ' ', option, ' ', value
def default_set(self, section, option, value):
'''model_set(self, section, option, value) --> None
Set a value in section, option for the model configuration.
'''
if not self.default_config.has_section(section):
self.default_config.add_section(section)
self.default_config.set(section, option, str(value))
#print 'Default set: ', section, ' ', option, ' ', value
def set(self, section, option, value):
'''set(self, section, option, value) --> None
Set a value in section, option for the model configuration.
Just a duplication of model_set.
'''
self.model_set(section, option, value)
def model_dump(self):
'''model_save(self, file_pointer) --> string
dumps the model configuration to a string.
'''
# Create a buffer - file like object to trick config parser
buffer = StringIO()
# write
self.model_config.write(buffer)
# get the string values
str = buffer.getvalue()
# Close the buffer - destroy it
buffer.close()
#print 'model content: ', str
return str
# END: class Config
#==============================================================================
#Some Exception definition for errorpassing
class GenericError(Exception):
''' Just a empty class used for inheritance. Only useful
to check if the errors are originating from the model library.
All these errors are controllable. If they not originate from
this class something has passed trough and that should be impossible '''
pass
class IOError(GenericError):
''' Error class for input output, mostly concerning files'''
def __init__(self, error_message, file = ''):
'''__init__(self, error_message)'''
self.error_message = error_message
self.file = file
def __str__(self):
text = 'Input/Output error for file:\n' + file +\
'\n\n Python error:\n ' + error_message
class OptionError(GenericError):
''' Error class for not finding an option section pair in the
configuration '''
def __init__(self, section, option):
'''__init__(self, error_message)'''
#self.error_message = error_message
self.section = section
self.option = option
def __str__(self):
text = 'Error in trying to loacate values in GenX configuration.' +\
'\nCould not locate the section: ' + self.section +\
' or option: ' + self.option + '.'
return text
|
navrasio/mxnet
|
refs/heads/master
|
example/autoencoder/autoencoder.py
|
18
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring, arguments-differ
from __future__ import print_function
import logging
import mxnet as mx
import numpy as np
import model
from solver import Solver, Monitor
class AutoEncoderModel(model.MXModel):
def setup(self, dims, sparseness_penalty=None, pt_dropout=None,
ft_dropout=None, input_act=None, internal_act='relu', output_act=None):
self.N = len(dims) - 1
self.dims = dims
self.stacks = []
self.pt_dropout = pt_dropout
self.ft_dropout = ft_dropout
self.input_act = input_act
self.internal_act = internal_act
self.output_act = output_act
self.data = mx.symbol.Variable('data')
for i in range(self.N):
if i == 0:
decoder_act = input_act
idropout = None
else:
decoder_act = internal_act
idropout = pt_dropout
if i == self.N-1:
encoder_act = output_act
odropout = None
else:
encoder_act = internal_act
odropout = pt_dropout
istack, iargs, iargs_grad, iargs_mult, iauxs = self.make_stack(
i, self.data, dims[i], dims[i+1], sparseness_penalty,
idropout, odropout, encoder_act, decoder_act
)
self.stacks.append(istack)
self.args.update(iargs)
self.args_grad.update(iargs_grad)
self.args_mult.update(iargs_mult)
self.auxs.update(iauxs)
self.encoder, self.internals = self.make_encoder(
self.data, dims, sparseness_penalty, ft_dropout, internal_act, output_act)
self.decoder = self.make_decoder(
self.encoder, dims, sparseness_penalty, ft_dropout, internal_act, input_act)
if input_act == 'softmax':
self.loss = self.decoder
else:
self.loss = mx.symbol.LinearRegressionOutput(data=self.decoder, label=self.data)
def make_stack(self, istack, data, num_input, num_hidden, sparseness_penalty=None,
idropout=None, odropout=None, encoder_act='relu', decoder_act='relu'):
x = data
if idropout:
x = mx.symbol.Dropout(data=x, p=idropout)
x = mx.symbol.FullyConnected(name='encoder_%d'%istack, data=x, num_hidden=num_hidden)
if encoder_act:
x = mx.symbol.Activation(data=x, act_type=encoder_act)
if encoder_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_encoder_%d' % istack, penalty=sparseness_penalty)
if odropout:
x = mx.symbol.Dropout(data=x, p=odropout)
x = mx.symbol.FullyConnected(name='decoder_%d'%istack, data=x, num_hidden=num_input)
if decoder_act == 'softmax':
x = mx.symbol.Softmax(data=x, label=data, prob_label=True, act_type=decoder_act)
elif decoder_act:
x = mx.symbol.Activation(data=x, act_type=decoder_act)
if decoder_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_decoder_%d' % istack, penalty=sparseness_penalty)
x = mx.symbol.LinearRegressionOutput(data=x, label=data)
else:
x = mx.symbol.LinearRegressionOutput(data=x, label=data)
args = {'encoder_%d_weight'%istack: mx.nd.empty((num_hidden, num_input), self.xpu),
'encoder_%d_bias'%istack: mx.nd.empty((num_hidden,), self.xpu),
'decoder_%d_weight'%istack: mx.nd.empty((num_input, num_hidden), self.xpu),
'decoder_%d_bias'%istack: mx.nd.empty((num_input,), self.xpu),}
args_grad = {'encoder_%d_weight'%istack: mx.nd.empty((num_hidden, num_input), self.xpu),
'encoder_%d_bias'%istack: mx.nd.empty((num_hidden,), self.xpu),
'decoder_%d_weight'%istack: mx.nd.empty((num_input, num_hidden), self.xpu),
'decoder_%d_bias'%istack: mx.nd.empty((num_input,), self.xpu),}
args_mult = {'encoder_%d_weight'%istack: 1.0,
'encoder_%d_bias'%istack: 2.0,
'decoder_%d_weight'%istack: 1.0,
'decoder_%d_bias'%istack: 2.0,}
auxs = {}
if encoder_act == 'sigmoid' and sparseness_penalty:
auxs['sparse_encoder_%d_moving_avg' % istack] = mx.nd.ones(num_hidden, self.xpu) * 0.5
if decoder_act == 'sigmoid' and sparseness_penalty:
auxs['sparse_decoder_%d_moving_avg' % istack] = mx.nd.ones(num_input, self.xpu) * 0.5
init = mx.initializer.Uniform(0.07)
for k, v in args.items():
init(mx.initializer.InitDesc(k), v)
return x, args, args_grad, args_mult, auxs
def make_encoder(self, data, dims, sparseness_penalty=None, dropout=None, internal_act='relu',
output_act=None):
x = data
internals = []
N = len(dims) - 1
for i in range(N):
x = mx.symbol.FullyConnected(name='encoder_%d'%i, data=x, num_hidden=dims[i+1])
if internal_act and i < N-1:
x = mx.symbol.Activation(data=x, act_type=internal_act)
if internal_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_encoder_%d' % i, penalty=sparseness_penalty)
elif output_act and i == N-1:
x = mx.symbol.Activation(data=x, act_type=output_act)
if output_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_encoder_%d' % i, penalty=sparseness_penalty)
if dropout:
x = mx.symbol.Dropout(data=x, p=dropout)
internals.append(x)
return x, internals
def make_decoder(self, feature, dims, sparseness_penalty=None, dropout=None,
internal_act='relu', input_act=None):
x = feature
N = len(dims) - 1
for i in reversed(range(N)):
x = mx.symbol.FullyConnected(name='decoder_%d'%i, data=x, num_hidden=dims[i])
if internal_act and i > 0:
x = mx.symbol.Activation(data=x, act_type=internal_act)
if internal_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_decoder_%d' % i, penalty=sparseness_penalty)
elif input_act and i == 0:
x = mx.symbol.Activation(data=x, act_type=input_act)
if input_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_decoder_%d' % i, penalty=sparseness_penalty)
if dropout and i > 0:
x = mx.symbol.Dropout(data=x, p=dropout)
return x
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
lr_scheduler=None, print_every=1000):
def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
for i in range(self.N):
if i == 0:
data_iter_i = data_iter
else:
X_i = list(model.extract_feature(
self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
self.xpu).values())[0]
data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
last_batch_handle='roll_over')
logging.info('Pre-training layer %d...', i)
solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
data_iter_i, 0, n_iter, {}, False)
def finetune(self, X, batch_size, n_iter, optimizer, l_rate, decay, lr_scheduler=None,
print_every=1000):
def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
logging.info('Fine tuning...')
solver.solve(self.xpu, self.loss, self.args, self.args_grad, self.auxs, data_iter,
0, n_iter, {}, False)
def eval(self, X):
batch_size = 100
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
Y = list(model.extract_feature(
self.loss, self.args, self.auxs, data_iter, X.shape[0], self.xpu).values())[0]
return np.mean(np.square(Y-X))/2.0
|
clouserw/olympia
|
refs/heads/master
|
apps/sharing/forms.py
|
22
|
from django import forms
from amo.helpers import absolutify
from translations.helpers import truncate
class ShareForm(forms.Form):
"""Only used for the field clean methods. Doesn't get exposed to user."""
title = forms.CharField()
url = forms.CharField()
description = forms.CharField(required=False)
def clean_url(self):
return absolutify(self.cleaned_data.get('url'))
def clean_description(self):
return truncate(self.cleaned_data.get('description', ''), 250)
|
xiaoyuanW/gem5
|
refs/heads/master
|
src/arch/x86/isa/insts/simd128/floating_point/arithmetic/simultaneous_addition_and_subtraction.py
|
46
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# ADDSUBPS
# ADDSUBPD
'''
|
MobinRanjbar/hue
|
refs/heads/master
|
desktop/core/ext-py/lxml-3.3.6/src/lxml/doctestcompare.py
|
43
|
"""
lxml-based doctest output comparison.
Note: normally, you should just import the `lxml.usedoctest` and
`lxml.html.usedoctest` modules from within a doctest, instead of this
one::
>>> import lxml.usedoctest # for XML output
>>> import lxml.html.usedoctest # for HTML output
To use this module directly, you must call ``lxmldoctest.install()``,
which will cause doctest to use this in all subsequent calls.
This changes the way output is checked and comparisons are made for
XML or HTML-like content.
XML or HTML content is noticed because the example starts with ``<``
(it's HTML if it starts with ``<html``). You can also use the
``PARSE_HTML`` and ``PARSE_XML`` flags to force parsing.
Some rough wildcard-like things are allowed. Whitespace is generally
ignored (except in attributes). In text (attributes and text in the
body) you can use ``...`` as a wildcard. In an example it also
matches any trailing tags in the element, though it does not match
leading tags. You may create a tag ``<any>`` or include an ``any``
attribute in the tag. An ``any`` tag matches any tag, while the
attribute matches any and all attributes.
When a match fails, the reformatted example and gotten text is
displayed (indented), and a rough diff-like output is given. Anything
marked with ``-`` is in the output but wasn't supposed to be, and
similarly ``+`` means its in the example but wasn't in the output.
You can disable parsing on one line with ``# doctest:+NOPARSE_MARKUP``
"""
from lxml import etree
import sys
import re
import doctest
import cgi
__all__ = ['PARSE_HTML', 'PARSE_XML', 'NOPARSE_MARKUP', 'LXMLOutputChecker',
'LHTMLOutputChecker', 'install', 'temp_install']
try:
_basestring = basestring
except NameError:
_basestring = (str, bytes)
_IS_PYTHON_3 = sys.version_info[0] >= 3
PARSE_HTML = doctest.register_optionflag('PARSE_HTML')
PARSE_XML = doctest.register_optionflag('PARSE_XML')
NOPARSE_MARKUP = doctest.register_optionflag('NOPARSE_MARKUP')
OutputChecker = doctest.OutputChecker
def strip(v):
if v is None:
return None
else:
return v.strip()
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
_html_parser = etree.HTMLParser(recover=False, remove_blank_text=True)
def html_fromstring(html):
return etree.fromstring(html, _html_parser)
# We use this to distinguish repr()s from elements:
_repr_re = re.compile(r'^<[^>]+ (at|object) ')
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
class LXMLOutputChecker(OutputChecker):
empty_tags = (
'param', 'img', 'area', 'br', 'basefont', 'input',
'base', 'meta', 'link', 'col')
def get_default_parser(self):
return etree.XML
def check_output(self, want, got, optionflags):
alt_self = getattr(self, '_temp_override_self', None)
if alt_self is not None:
super_method = self._temp_call_super_check_output
self = alt_self
else:
super_method = OutputChecker.check_output
parser = self.get_parser(want, got, optionflags)
if not parser:
return super_method(
self, want, got, optionflags)
try:
want_doc = parser(want)
except etree.XMLSyntaxError:
return False
try:
got_doc = parser(got)
except etree.XMLSyntaxError:
return False
return self.compare_docs(want_doc, got_doc)
def get_parser(self, want, got, optionflags):
parser = None
if NOPARSE_MARKUP & optionflags:
return None
if PARSE_HTML & optionflags:
parser = html_fromstring
elif PARSE_XML & optionflags:
parser = etree.XML
elif (want.strip().lower().startswith('<html')
and got.strip().startswith('<html')):
parser = html_fromstring
elif (self._looks_like_markup(want)
and self._looks_like_markup(got)):
parser = self.get_default_parser()
return parser
def _looks_like_markup(self, s):
s = s.strip()
return (s.startswith('<')
and not _repr_re.search(s))
def compare_docs(self, want, got):
if not self.tag_compare(want.tag, got.tag):
return False
if not self.text_compare(want.text, got.text, True):
return False
if not self.text_compare(want.tail, got.tail, True):
return False
if 'any' not in want.attrib:
want_keys = sorted(want.attrib.keys())
got_keys = sorted(got.attrib.keys())
if want_keys != got_keys:
return False
for key in want_keys:
if not self.text_compare(want.attrib[key], got.attrib[key], False):
return False
if want.text != '...' or len(want):
want_children = list(want)
got_children = list(got)
while want_children or got_children:
if not want_children or not got_children:
return False
want_first = want_children.pop(0)
got_first = got_children.pop(0)
if not self.compare_docs(want_first, got_first):
return False
if not got_children and want_first.tail == '...':
break
return True
def text_compare(self, want, got, strip):
want = want or ''
got = got or ''
if strip:
want = norm_whitespace(want).strip()
got = norm_whitespace(got).strip()
want = '^%s$' % re.escape(want)
want = want.replace(r'\.\.\.', '.*')
if re.search(want, got):
return True
else:
return False
def tag_compare(self, want, got):
if want == 'any':
return True
if (not isinstance(want, _basestring)
or not isinstance(got, _basestring)):
return want == got
want = want or ''
got = got or ''
if want.startswith('{...}'):
# Ellipsis on the namespace
return want.split('}')[-1] == got.split('}')[-1]
else:
return want == got
def output_difference(self, example, got, optionflags):
want = example.want
parser = self.get_parser(want, got, optionflags)
errors = []
if parser is not None:
try:
want_doc = parser(want)
except etree.XMLSyntaxError:
e = sys.exc_info()[1]
errors.append('In example: %s' % e)
try:
got_doc = parser(got)
except etree.XMLSyntaxError:
e = sys.exc_info()[1]
errors.append('In actual output: %s' % e)
if parser is None or errors:
value = OutputChecker.output_difference(
self, example, got, optionflags)
if errors:
errors.append(value)
return '\n'.join(errors)
else:
return value
html = parser is html_fromstring
diff_parts = []
diff_parts.append('Expected:')
diff_parts.append(self.format_doc(want_doc, html, 2))
diff_parts.append('Got:')
diff_parts.append(self.format_doc(got_doc, html, 2))
diff_parts.append('Diff:')
diff_parts.append(self.collect_diff(want_doc, got_doc, html, 2))
return '\n'.join(diff_parts)
def html_empty_tag(self, el, html=True):
if not html:
return False
if el.tag not in self.empty_tags:
return False
if el.text or len(el):
# This shouldn't happen (contents in an empty tag)
return False
return True
def format_doc(self, doc, html, indent, prefix=''):
parts = []
if not len(doc):
# No children...
parts.append(' '*indent)
parts.append(prefix)
parts.append(self.format_tag(doc))
if not self.html_empty_tag(doc, html):
if strip(doc.text):
parts.append(self.format_text(doc.text))
parts.append(self.format_end_tag(doc))
if strip(doc.tail):
parts.append(self.format_text(doc.tail))
parts.append('\n')
return ''.join(parts)
parts.append(' '*indent)
parts.append(prefix)
parts.append(self.format_tag(doc))
if not self.html_empty_tag(doc, html):
parts.append('\n')
if strip(doc.text):
parts.append(' '*indent)
parts.append(self.format_text(doc.text))
parts.append('\n')
for el in doc:
parts.append(self.format_doc(el, html, indent+2))
parts.append(' '*indent)
parts.append(self.format_end_tag(doc))
parts.append('\n')
if strip(doc.tail):
parts.append(' '*indent)
parts.append(self.format_text(doc.tail))
parts.append('\n')
return ''.join(parts)
def format_text(self, text, strip=True):
if text is None:
return ''
if strip:
text = text.strip()
return cgi.escape(text, 1)
def format_tag(self, el):
attrs = []
if isinstance(el, etree.CommentBase):
# FIXME: probably PIs should be handled specially too?
return '<!--'
for name, value in sorted(el.attrib.items()):
attrs.append('%s="%s"' % (name, self.format_text(value, False)))
if not attrs:
return '<%s>' % el.tag
return '<%s %s>' % (el.tag, ' '.join(attrs))
def format_end_tag(self, el):
if isinstance(el, etree.CommentBase):
# FIXME: probably PIs should be handled specially too?
return '-->'
return '</%s>' % el.tag
def collect_diff(self, want, got, html, indent):
parts = []
if not len(want) and not len(got):
parts.append(' '*indent)
parts.append(self.collect_diff_tag(want, got))
if not self.html_empty_tag(got, html):
parts.append(self.collect_diff_text(want.text, got.text))
parts.append(self.collect_diff_end_tag(want, got))
parts.append(self.collect_diff_text(want.tail, got.tail))
parts.append('\n')
return ''.join(parts)
parts.append(' '*indent)
parts.append(self.collect_diff_tag(want, got))
parts.append('\n')
if strip(want.text) or strip(got.text):
parts.append(' '*indent)
parts.append(self.collect_diff_text(want.text, got.text))
parts.append('\n')
want_children = list(want)
got_children = list(got)
while want_children or got_children:
if not want_children:
parts.append(self.format_doc(got_children.pop(0), html, indent+2, '-'))
continue
if not got_children:
parts.append(self.format_doc(want_children.pop(0), html, indent+2, '+'))
continue
parts.append(self.collect_diff(
want_children.pop(0), got_children.pop(0), html, indent+2))
parts.append(' '*indent)
parts.append(self.collect_diff_end_tag(want, got))
parts.append('\n')
if strip(want.tail) or strip(got.tail):
parts.append(' '*indent)
parts.append(self.collect_diff_text(want.tail, got.tail))
parts.append('\n')
return ''.join(parts)
def collect_diff_tag(self, want, got):
if not self.tag_compare(want.tag, got.tag):
tag = '%s (got: %s)' % (want.tag, got.tag)
else:
tag = got.tag
attrs = []
any = want.tag == 'any' or 'any' in want.attrib
for name, value in sorted(got.attrib.items()):
if name not in want.attrib and not any:
attrs.append('-%s="%s"' % (name, self.format_text(value, False)))
else:
if name in want.attrib:
text = self.collect_diff_text(want.attrib[name], value, False)
else:
text = self.format_text(value, False)
attrs.append('%s="%s"' % (name, text))
if not any:
for name, value in sorted(want.attrib.items()):
if name in got.attrib:
continue
attrs.append('+%s="%s"' % (name, self.format_text(value, False)))
if attrs:
tag = '<%s %s>' % (tag, ' '.join(attrs))
else:
tag = '<%s>' % tag
return tag
def collect_diff_end_tag(self, want, got):
if want.tag != got.tag:
tag = '%s (got: %s)' % (want.tag, got.tag)
else:
tag = got.tag
return '</%s>' % tag
def collect_diff_text(self, want, got, strip=True):
if self.text_compare(want, got, strip):
if not got:
return ''
return self.format_text(got, strip)
text = '%s (got: %s)' % (want, got)
return self.format_text(text, strip)
class LHTMLOutputChecker(LXMLOutputChecker):
def get_default_parser(self):
return html_fromstring
def install(html=False):
"""
Install doctestcompare for all future doctests.
If html is true, then by default the HTML parser will be used;
otherwise the XML parser is used.
"""
if html:
doctest.OutputChecker = LHTMLOutputChecker
else:
doctest.OutputChecker = LXMLOutputChecker
def temp_install(html=False, del_module=None):
"""
Use this *inside* a doctest to enable this checker for this
doctest only.
If html is true, then by default the HTML parser will be used;
otherwise the XML parser is used.
"""
if html:
Checker = LHTMLOutputChecker
else:
Checker = LXMLOutputChecker
frame = _find_doctest_frame()
dt_self = frame.f_locals['self']
checker = Checker()
old_checker = dt_self._checker
dt_self._checker = checker
# The unfortunate thing is that there is a local variable 'check'
# in the function that runs the doctests, that is a bound method
# into the output checker. We have to update that. We can't
# modify the frame, so we have to modify the object in place. The
# only way to do this is to actually change the func_code
# attribute of the method. We change it, and then wait for
# __record_outcome to be run, which signals the end of the __run
# method, at which point we restore the previous check_output
# implementation.
if _IS_PYTHON_3:
check_func = frame.f_locals['check'].__func__
checker_check_func = checker.check_output.__func__
else:
check_func = frame.f_locals['check'].im_func
checker_check_func = checker.check_output.im_func
# Because we can't patch up func_globals, this is the only global
# in check_output that we care about:
doctest.etree = etree
_RestoreChecker(dt_self, old_checker, checker,
check_func, checker_check_func,
del_module)
class _RestoreChecker(object):
def __init__(self, dt_self, old_checker, new_checker, check_func, clone_func,
del_module):
self.dt_self = dt_self
self.checker = old_checker
self.checker._temp_call_super_check_output = self.call_super
self.checker._temp_override_self = new_checker
self.check_func = check_func
self.clone_func = clone_func
self.del_module = del_module
self.install_clone()
self.install_dt_self()
def install_clone(self):
if _IS_PYTHON_3:
self.func_code = self.check_func.__code__
self.func_globals = self.check_func.__globals__
self.check_func.__code__ = self.clone_func.__code__
else:
self.func_code = self.check_func.func_code
self.func_globals = self.check_func.func_globals
self.check_func.func_code = self.clone_func.func_code
def uninstall_clone(self):
if _IS_PYTHON_3:
self.check_func.__code__ = self.func_code
else:
self.check_func.func_code = self.func_code
def install_dt_self(self):
self.prev_func = self.dt_self._DocTestRunner__record_outcome
self.dt_self._DocTestRunner__record_outcome = self
def uninstall_dt_self(self):
self.dt_self._DocTestRunner__record_outcome = self.prev_func
def uninstall_module(self):
if self.del_module:
import sys
del sys.modules[self.del_module]
if '.' in self.del_module:
package, module = self.del_module.rsplit('.', 1)
package_mod = sys.modules[package]
delattr(package_mod, module)
def __call__(self, *args, **kw):
self.uninstall_clone()
self.uninstall_dt_self()
del self.checker._temp_override_self
del self.checker._temp_call_super_check_output
result = self.prev_func(*args, **kw)
self.uninstall_module()
return result
def call_super(self, *args, **kw):
self.uninstall_clone()
try:
return self.check_func(*args, **kw)
finally:
self.install_clone()
def _find_doctest_frame():
import sys
frame = sys._getframe(1)
while frame:
l = frame.f_locals
if 'BOOM' in l:
# Sign of doctest
return frame
frame = frame.f_back
raise LookupError(
"Could not find doctest (only use this function *inside* a doctest)")
__test__ = {
'basic': '''
>>> temp_install()
>>> print """<xml a="1" b="2">stuff</xml>"""
<xml b="2" a="1">...</xml>
>>> print """<xml xmlns="http://example.com"><tag attr="bar" /></xml>"""
<xml xmlns="...">
<tag attr="..." />
</xml>
>>> print """<xml>blahblahblah<foo /></xml>""" # doctest: +NOPARSE_MARKUP, +ELLIPSIS
<xml>...foo /></xml>
'''}
if __name__ == '__main__':
import doctest
doctest.testmod()
|
ActiveState/code
|
refs/heads/master
|
recipes/Python/305267_Basics_heapq__wpyth24/recipe-305267.py
|
1
|
an unordered list of numbers
the_list=[903, 10, 35, 69, 933, 485, 519, 379, 102, 402, 883, 1]
#standard list.sort() techniques
#to get lowest element which is 1, sort and pop
the_list.sort()
print the_list.pop(0)
>>> 1
#if you get more data, you need to sort again before popping
the_list.append(0)
print the_list.pop(0)
>>> 10 -- oops not zero, didn't sort
#a heap solves this problem, by always being ordered
#to create a heap you push with heapq.heappush onto a list
import heapq
the_heap = []
for i in the_list: heapq.heappush(the_heap, i)
print the_heap
#note how zero is first, but the heap isn't fully ordered
>>> [0, 35, 102, 379, 69, 485, 519, 883, 903, 933, 402]
#if you add some more zeros, the fact that it is not fully sorted
#becomes more obvious, look at where the zeros are at
heapq.heappush(the_heap,0)
heapq.heappush(the_heap,0)
print the_heap
>>> [0, 35, 0, 379, 69, 0, 519, 883, 903, 933, 402, 485, 102]
#But, you will still get data back in an ordered way when you pop
print heapq.heappop(the_heap)
>>> 0
print heapq.heappop(the_heap)
>>> 0
print the_heap
>>> [0, 35, 102, 379, 69, 485, 519, 883, 903, 933, 402]
#The method heapreplace is a combination of a pop and a push.
#In this case the smallest element 0 is popped off and 200 is inserted at
#some other place into the heap
print heapq.heapreplace(the_heap, 200)
>>> 0
print the_heap
>>> [35, 69, 102, 379, 200, 485, 519, 883, 903, 933, 402]
#Ask for 5 largest or smallest-- does an actual sort for this
print heapq.nlargest(5,the_heap) #[933, 903, 883, 519, 485]
print heapq.nsmallest(5,the_heap) #[35, 69, 102, 200, 379]
#Popping everything off of the heap will give sorted results
while 1:
try:
print heapq.heappop(the_heap),
except IndexError:
break
>>> 35 69 102 200 379 402 485 519 883 903 933
|
xuanyuanking/spark
|
refs/heads/master
|
examples/src/main/python/mllib/summary_statistics_example.py
|
27
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
# $example on$
import numpy as np
from pyspark.mllib.stat import Statistics
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="SummaryStatisticsExample") # SparkContext
# $example on$
mat = sc.parallelize(
[np.array([1.0, 10.0, 100.0]), np.array([2.0, 20.0, 200.0]), np.array([3.0, 30.0, 300.0])]
) # an RDD of Vectors
# Compute column summary statistics.
summary = Statistics.colStats(mat)
print(summary.mean()) # a dense vector containing the mean value for each column
print(summary.variance()) # column-wise variance
print(summary.numNonzeros()) # number of nonzeros in each column
# $example off$
sc.stop()
|
sadmansk/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/webdriver/webdriver/__init__.py
|
20
|
# flake8: noqa
from client import (
Cookies,
Element,
Find,
Frame,
Session,
Timeouts,
Window)
from error import (
ElementNotSelectableException,
ElementNotVisibleException,
InvalidArgumentException,
InvalidCookieDomainException,
InvalidElementCoordinatesException,
InvalidElementStateException,
InvalidSelectorException,
InvalidSessionIdException,
JavascriptErrorException,
MoveTargetOutOfBoundsException,
NoSuchAlertException,
NoSuchElementException,
NoSuchFrameException,
NoSuchWindowException,
ScriptTimeoutException,
SessionNotCreatedException,
StaleElementReferenceException,
TimeoutException,
UnableToSetCookieException,
UnexpectedAlertOpenException,
UnknownCommandException,
UnknownErrorException,
UnknownMethodException,
UnsupportedOperationException,
WebDriverException)
|
blaze33/django
|
refs/heads/ticket_19456
|
tests/modeltests/many_to_many/tests.py
|
109
|
from __future__ import absolute_import
from django.test import TestCase
from django.utils import six
from .models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
p4 = a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
new_article = self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id,self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id,self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1,self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id,self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id,self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1,self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
|
BrianVermeire/PyFR
|
refs/heads/develop
|
pyfr/solvers/baseadvecdiff/__init__.py
|
8
|
# -*- coding: utf-8 -*-
from pyfr.solvers.baseadvecdiff.system import BaseAdvectionDiffusionSystem
from pyfr.solvers.baseadvecdiff.elements import BaseAdvectionDiffusionElements
from pyfr.solvers.baseadvecdiff.inters import (BaseAdvectionDiffusionBCInters,
BaseAdvectionDiffusionIntInters,
BaseAdvectionDiffusionMPIInters)
|
lflrocha/ebc.monitoramento
|
refs/heads/master
|
ebc/monitoramento/portlets/__init__.py
|
9480
|
#
|
avicizhu/Load-balancer
|
refs/heads/master
|
.waf-1.6.11-30618c54883417962c38f5d395f83584/waflib/Tools/glib2.py
|
14
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Utils,Options,Errors,Logs
from waflib.TaskGen import taskgen_method,before_method,after_method,feature
def add_marshal_file(self,filename,prefix):
if not hasattr(self,'marshal_list'):
self.marshal_list=[]
self.meths.append('process_marshal')
self.marshal_list.append((filename,prefix))
def process_marshal(self):
for f,prefix in getattr(self,'marshal_list',[]):
node=self.path.find_resource(f)
if not node:
raise Errors.WafError('file not found %r'%f)
h_node=node.change_ext('.h')
c_node=node.change_ext('.c')
task=self.create_task('glib_genmarshal',node,[h_node,c_node])
task.env.GLIB_GENMARSHAL_PREFIX=prefix
self.source=self.to_nodes(getattr(self,'source',[]))
self.source.append(c_node)
class glib_genmarshal(Task.Task):
def run(self):
bld=self.inputs[0].__class__.ctx
get=self.env.get_flat
cmd1="%s %s --prefix=%s --header > %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[0].abspath())
ret=bld.exec_command(cmd1)
if ret:return ret
c='''#include "%s"\n'''%self.outputs[0].name
self.outputs[1].write(c)
cmd2="%s %s --prefix=%s --body >> %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[1].abspath())
return bld.exec_command(cmd2)
vars=['GLIB_GENMARSHAL_PREFIX','GLIB_GENMARSHAL']
color='BLUE'
ext_out=['.h']
def add_enums_from_template(self,source='',target='',template='',comments=''):
if not hasattr(self,'enums_list'):
self.enums_list=[]
self.meths.append('process_enums')
self.enums_list.append({'source':source,'target':target,'template':template,'file-head':'','file-prod':'','file-tail':'','enum-prod':'','value-head':'','value-prod':'','value-tail':'','comments':comments})
def add_enums(self,source='',target='',file_head='',file_prod='',file_tail='',enum_prod='',value_head='',value_prod='',value_tail='',comments=''):
if not hasattr(self,'enums_list'):
self.enums_list=[]
self.meths.append('process_enums')
self.enums_list.append({'source':source,'template':'','target':target,'file-head':file_head,'file-prod':file_prod,'file-tail':file_tail,'enum-prod':enum_prod,'value-head':value_head,'value-prod':value_prod,'value-tail':value_tail,'comments':comments})
def process_enums(self):
for enum in getattr(self,'enums_list',[]):
task=self.create_task('glib_mkenums')
env=task.env
inputs=[]
source_list=self.to_list(enum['source'])
if not source_list:
raise Errors.WafError('missing source '+str(enum))
source_list=[self.path.find_resource(k)for k in source_list]
inputs+=source_list
env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list]
if not enum['target']:
raise Errors.WafError('missing target '+str(enum))
tgt_node=self.path.find_or_declare(enum['target'])
if tgt_node.name.endswith('.c'):
self.source.append(tgt_node)
env['GLIB_MKENUMS_TARGET']=tgt_node.abspath()
options=[]
if enum['template']:
template_node=self.path.find_resource(enum['template'])
options.append('--template %s'%(template_node.abspath()))
inputs.append(template_node)
params={'file-head':'--fhead','file-prod':'--fprod','file-tail':'--ftail','enum-prod':'--eprod','value-head':'--vhead','value-prod':'--vprod','value-tail':'--vtail','comments':'--comments'}
for param,option in params.items():
if enum[param]:
options.append('%s %r'%(option,enum[param]))
env['GLIB_MKENUMS_OPTIONS']=' '.join(options)
task.set_inputs(inputs)
task.set_outputs(tgt_node)
class glib_mkenums(Task.Task):
run_str='${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}'
color='PINK'
ext_out=['.h']
def add_settings_schemas(self,filename_list):
if not hasattr(self,'settings_schema_files'):
self.settings_schema_files=[]
if not isinstance(filename_list,list):
filename_list=[filename_list]
self.settings_schema_files.extend(filename_list)
def add_settings_enums(self,namespace,filename_list):
if hasattr(self,'settings_enum_namespace'):
raise Errors.WafError("Tried to add gsettings enums to '%s' more than once"%self.name)
self.settings_enum_namespace=namespace
if type(filename_list)!='list':
filename_list=[filename_list]
self.settings_enum_files=filename_list
def r_change_ext(self,ext):
name=self.name
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
return self.parent.find_or_declare([name])
def process_settings(self):
enums_tgt_node=[]
install_files=[]
settings_schema_files=getattr(self,'settings_schema_files',[])
if settings_schema_files and not self.env['GLIB_COMPILE_SCHEMAS']:
raise Errors.WafError("Unable to process GSettings schemas - glib-compile-schemas was not found during configure")
if hasattr(self,'settings_enum_files'):
enums_task=self.create_task('glib_mkenums')
source_list=self.settings_enum_files
source_list=[self.path.find_resource(k)for k in source_list]
enums_task.set_inputs(source_list)
enums_task.env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list]
target=self.settings_enum_namespace+'.enums.xml'
tgt_node=self.path.find_or_declare(target)
enums_task.set_outputs(tgt_node)
enums_task.env['GLIB_MKENUMS_TARGET']=tgt_node.abspath()
enums_tgt_node=[tgt_node]
install_files.append(tgt_node)
options='--comments "<!-- @comment@ -->" --fhead "<schemalist>" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " <value nick=\\"@valuenick@\\" value=\\"@valuenum@\\"/>" --vtail " </@type@>" --ftail "</schemalist>" '%(self.settings_enum_namespace)
enums_task.env['GLIB_MKENUMS_OPTIONS']=options
for schema in settings_schema_files:
schema_task=self.create_task('glib_validate_schema')
schema_node=self.path.find_resource(schema)
if not schema_node:
raise Errors.WafError("Cannot find the schema file '%s'"%schema)
install_files.append(schema_node)
source_list=enums_tgt_node+[schema_node]
schema_task.set_inputs(source_list)
schema_task.env['GLIB_COMPILE_SCHEMAS_OPTIONS']=[("--schema-file="+k.abspath())for k in source_list]
target_node=r_change_ext(schema_node,'.xml.valid')
schema_task.set_outputs(target_node)
schema_task.env['GLIB_VALIDATE_SCHEMA_OUTPUT']=target_node.abspath()
def compile_schemas_callback(bld):
if not bld.is_install:return
Logs.pprint('YELLOW','Updating GSettings schema cache')
command=Utils.subst_vars("${GLIB_COMPILE_SCHEMAS} ${GSETTINGSSCHEMADIR}",bld.env)
ret=self.bld.exec_command(command)
if self.bld.is_install:
if not self.env['GSETTINGSSCHEMADIR']:
raise Errors.WafError('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)')
if install_files:
self.bld.install_files(self.env['GSETTINGSSCHEMADIR'],install_files)
if not hasattr(self.bld,'_compile_schemas_registered'):
self.bld.add_post_fun(compile_schemas_callback)
self.bld._compile_schemas_registered=True
class glib_validate_schema(Task.Task):
run_str='rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}'
color='PINK'
def configure(conf):
conf.find_program('glib-genmarshal',var='GLIB_GENMARSHAL')
conf.find_perl_program('glib-mkenums',var='GLIB_MKENUMS')
conf.find_program('glib-compile-schemas',var='GLIB_COMPILE_SCHEMAS',mandatory=False)
def getstr(varname):
return getattr(Options.options,varname,getattr(conf.env,varname,''))
gsettingsschemadir=getstr('GSETTINGSSCHEMADIR')
if not gsettingsschemadir:
datadir=getstr('DATADIR')
if not datadir:
prefix=conf.env['PREFIX']
datadir=os.path.join(prefix,'share')
gsettingsschemadir=os.path.join(datadir,'glib-2.0','schemas')
conf.env['GSETTINGSSCHEMADIR']=gsettingsschemadir
def options(opt):
opt.add_option('--gsettingsschemadir',help='GSettings schema location [Default: ${datadir}/glib-2.0/schemas]',default='',dest='GSETTINGSSCHEMADIR')
taskgen_method(add_marshal_file)
before_method('process_source')(process_marshal)
taskgen_method(add_enums_from_template)
taskgen_method(add_enums)
before_method('process_source')(process_enums)
taskgen_method(add_settings_schemas)
taskgen_method(add_settings_enums)
feature('glib2')(process_settings)
|
NLeSC/pointcloud-benchmark
|
refs/heads/master
|
python/pointcloud/run/results/plot_io.py
|
1
|
#!/usr/bin/env python
################################################################################
# Created by Oscar Martinez #
# o.rubi@esciencecenter.nl #
################################################################################
import sys
from pointcloud import utils
inputFile = sys.argv[1]
outputFile = sys.argv[2]
(times, rdata, wdata) = utils.parseIO(inputFile)
if len(sys.argv) != 3:
step = int(sys.argv[3])
times = times[::step]
for k in rdata:
rdata[k] = utils.chunkedMean(rdata[k],step)
wdata[k] = utils.chunkedMean(wdata[k],step)
utils.saveIO(times, rdata, wdata, 'IO', outputFile)
|
camptocamp/QGIS
|
refs/heads/master
|
python/plugins/processing/gdal/__init__.py
|
12133432
| |
SerCeMan/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/eu/__init__.py
|
12133432
| |
data-tsunami/smoke
|
refs/heads/master
|
smoke/services/__init__.py
|
12133432
| |
dex4er/django
|
refs/heads/1.6.x
|
django/conf/locale/mn/__init__.py
|
12133432
| |
takaakiaoki/PyFoam
|
refs/heads/master
|
PyFoam/Paraview/SimpleSources.py
|
2
|
# ICE Revision: $Id$
""" Simple sources
Builds and displays simple sources. Grants easy access to the actual source
and the representation objects"""
from paraview import servermanager
from PyFoam.Paraview import proxyManager as pm
from PyFoam.Paraview import renderView as rv
from PyFoam.Paraview import characteristicLength as lc
from PyFoam.Paraview import getCenter as gc
from PyFoam.Paraview import transformsModule as tm
from SourceBase import SourceBase
import math
class SimpleSource(SourceBase):
"""Base class for the simple sources
The member src is the actual source object.
The member repr is the representation object"""
def __init__(self,name,src):
"""@param name: The name under which the thing should be displayed
@param src: the actual source proxy"""
SourceBase.__init__(self,src)
self.name = name
pm.RegisterProxy("sources",self.name,self.src)
self.repr=servermanager.CreateRepresentation(self.src,rv())
pm.RegisterProxy("representations",self.name+"_repr",self.repr)
def unregister(self):
"""Unregister the Proxies, but keept the objects"""
pm.UnRegisterProxy("sources",self.name,self.src)
pm.UnRegisterProxy("representations",self.name+"_repr",self.repr)
def __del__(self):
"""Does not yet work properly"""
self.unregister()
del self.src
del self.repr
class Sphere(SimpleSource):
"""Displays a sphere"""
def __init__(self,name,center,relRadius=0.01,absRadius=None):
"""@param name: name under which the sphere should be displayed
@param center: the center of the sphere
@param relRadius: radius relative to the characteristic length
@param absRadius: absolute radius. Overrides relRadius if set"""
try:
sphr=servermanager.sources.SphereSource()
except AttributeError:
sphr=servermanager.sources.Sphere()
sphr.Center=list(center)
if absRadius:
sphr.Radius=absRadius
else:
sphr.Radius=lc()*relRadius
SimpleSource.__init__(self,name,sphr)
class Point(SimpleSource):
"""Displays a point"""
def __init__(self,name,center):
"""@param name: name under which the point should be displayed
@param center: the center of the point"""
pt=servermanager.sources.PointSource()
pt.Center = list(center)
SimpleSource.__init__(self,name,pt)
class Line(SimpleSource):
"""Displays a line"""
def __init__(self,name,pt1,pt2):
"""@param name: name under which the line should be displayed
@param pt1: the start of the line
@param pt2: the end of the line"""
try:
ln=servermanager.sources.LineSource()
except AttributeError:
ln=servermanager.sources.Line()
ln.Point1 = list(pt1)
ln.Point2 = list(pt2)
SimpleSource.__init__(self,name,ln)
class Plane(SimpleSource):
"""Displays a plane"""
def __init__(self,name,origin,pt1,pt2):
"""@param name: name under which the plane should be displayed
@param origin: the origin of the plane
@param pt1: one point the plane spans to
@param pt2: the other point the plane spans to"""
try:
pl=servermanager.sources.PlaneSource()
except AttributeError:
pl=servermanager.sources.Plane()
pl.Origin = list(origin)
pl.Point1 = list(pt1)
pl.Point2 = list(pt2)
SimpleSource.__init__(self,name,pl)
class Cube(SimpleSource):
"""Displays a cube"""
def __init__(self,name,pt1,pt2):
"""@param name: name under which the cube should be displayed
@param pt1: Point one that describes the box
@param pt2: Point two that describes the box"""
pt1=self.makeVector(pt1)
pt2=self.makeVector(pt2)
try:
box=servermanager.sources.CubeSource()
except AttributeError:
box=servermanager.sources.Box()
box.Center=list(0.5*(pt1+pt2))
diff=pt1-pt2
box.XLength=abs(diff[0])
box.YLength=abs(diff[1])
box.ZLength=abs(diff[2])
SimpleSource.__init__(self,name,box)
class STL(SimpleSource):
"""Displays a STL-File"""
def __init__(self,name,stlFile):
"""@param name: name under which the surface should be displayed
@param stlFile: the STL-file"""
try:
stl=servermanager.sources.stlreader()
except AttributeError:
stl=servermanager.sources.STLReader()
stl.FileNames=[stlFile]
stl.UpdatePipeline()
SimpleSource.__init__(self,name,stl)
class Text(SimpleSource):
"""Displays a Vector-Text"""
def __init__(self,name,text,scale=1,position=None):
"""@param name: name under which the sphere should be displayed
@param text: the text that will be displayed
@param scale: the scaling of the text (in terms ofcharacterist length of the geometry
@param position: the actual position at which the object should be centered"""
try:
txt=servermanager.sources.VectorText()
except AttributeError:
txt=servermanager.sources.a3DText()
txt.Text=text
SimpleSource.__init__(self,name,txt)
if not position:
position=gc()
try:
self.repr.Translate=list(position)
except AttributeError:
self.repr.Position=list(position)
self.repr.Origin=list(position)
scale*=lc()/self.characteristicLength()
self.repr.Scale=(scale,scale,scale)
class DirectedSource(SimpleSource):
"""A Source that looks in a specific direction.
Assumes that the original base is located at (0 0 0)"""
def __init__(self,name,src,base,tip):
"""@param name: name under which the arrow will be displayed
@param src: The source objects
@param base: the base the arrow points away from
@param tip: the point the arrow points to"""
SimpleSource.__init__(self,name,src)
self.base=base
self.tip =tip
self.recalc()
# self.arrow=SimpleSource(name,ar)
# tf=servermanager.filters.TransformFilter(Input = ar)
# trafo=tm().Transform()
# trafo.Position = list(base)
# trafo.Scale = [abs(base-tip)]*3
# tf.Transform = trafo
def recalc(self):
"""Recalculate the orientation of the object according to the tip and
the base"""
diff=self.tip-self.base
r=abs(diff)
phi=math.acos(diff[0]/(r+1e-15))*180/math.pi
theta=math.atan2(diff[1],-diff[2])*180/math.pi
self.repr.Scale=[r]*3
self.repr.Position=list(self.base)
self.repr.Orientation=[theta,phi,0]
def setBase(self,base):
"""Reset the base point"""
self.base=base
self.recalc()
def setTip(self,tip):
"""Reset the tip point"""
self.tip=tip
self.recalc()
class Arrow(DirectedSource):
"""Displays a simple arrow"""
def __init__(self,name,base,tip):
"""@param name: name under which the arrow will be displayed
@param base: the base the arrow points away from
@param tip: the point the arrow points to"""
try:
DirectedSource.__init__(self,
name,
servermanager.sources.ArrowSource(),
base,
tip)
except AttributeError:
DirectedSource.__init__(self,
name,
servermanager.sources.Arrow(),
base,
tip)
class Glyph(DirectedSource):
"""Displays a simple glyph"""
def __init__(self,name,base,tip):
"""@param name: name under which the glyph will be displayed
@param base: the base the glyph points away from
@param tip: the point the glyph points to"""
try:
DirectedSource.__init__(self,
name,
servermanager.sources.GlyphSource2D(),
base,
tip)
except AttributeError:
DirectedSource.__init__(self,
name,
servermanager.sources.a2DGlyph(),
base,
tip)
|
yongshengwang/hue
|
refs/heads/master
|
build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/IPython/kernel/tests/tasktest.py
|
6
|
#!/usr/bin/env python
# encoding: utf-8
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import time
from IPython.kernel import task, engineservice as es
from IPython.kernel.util import printer
from IPython.kernel import error
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
def _raise_it(f):
try:
f.raiseException()
except CompositeError, e:
e.raise_exception()
class TaskTestBase(object):
def addEngine(self, n=1):
for i in range(n):
e = es.EngineService()
e.startService()
regDict = self.controller.register_engine(es.QueuedEngine(e), None)
e.id = regDict['id']
self.engines.append(e)
class ITaskControllerTestCase(TaskTestBase):
def test_task_ids(self):
self.addEngine(1)
d = self.tc.run(task.StringTask('a=5'))
d.addCallback(lambda r: self.assertEquals(r, 0))
d.addCallback(lambda r: self.tc.run(task.StringTask('a=5')))
d.addCallback(lambda r: self.assertEquals(r, 1))
d.addCallback(lambda r: self.tc.run(task.StringTask('a=5')))
d.addCallback(lambda r: self.assertEquals(r, 2))
d.addCallback(lambda r: self.tc.run(task.StringTask('a=5')))
d.addCallback(lambda r: self.assertEquals(r, 3))
return d
def test_abort(self):
"""Cannot do a proper abort test, because blocking execution prevents
abort from being called before task completes"""
self.addEngine(1)
t = task.StringTask('a=5')
d = self.tc.abort(0)
d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
d.addCallback(lambda _:self.tc.run(t))
d.addCallback(self.tc.abort)
d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
return d
def test_abort_type(self):
self.addEngine(1)
d = self.tc.abort('asdfadsf')
d.addErrback(lambda f: self.assertRaises(TypeError, f.raiseException))
return d
def test_clear_before_and_after(self):
self.addEngine(1)
t = task.StringTask('a=1', clear_before=True, pull='b', clear_after=True)
d = self.multiengine.execute('b=1', targets=0)
d.addCallback(lambda _: self.tc.run(t))
d.addCallback(lambda tid: self.tc.get_task_result(tid,block=True))
d.addCallback(lambda tr: tr.failure)
d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
d.addCallback(lambda _:self.multiengine.pull('a', targets=0))
d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
return d
def test_simple_retries(self):
self.addEngine(1)
t = task.StringTask("i += 1\nassert i == 16", pull='i',retries=10)
t2 = task.StringTask("i += 1\nassert i == 16", pull='i',retries=10)
d = self.multiengine.execute('i=0', targets=0)
d.addCallback(lambda r: self.tc.run(t))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: tr.ns.i)
d.addErrback(lambda f: self.assertRaises(AssertionError, f.raiseException))
d.addCallback(lambda r: self.tc.run(t2))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: tr.ns.i)
d.addCallback(lambda r: self.assertEquals(r, 16))
return d
def test_recovery_tasks(self):
self.addEngine(1)
t = task.StringTask("i=16", pull='i')
t2 = task.StringTask("raise Exception", recovery_task=t, retries = 2)
d = self.tc.run(t2)
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: tr.ns.i)
d.addCallback(lambda r: self.assertEquals(r, 16))
return d
def test_setup_ns(self):
self.addEngine(1)
d = self.multiengine.execute('a=0', targets=0)
ns = dict(a=1, b=0)
t = task.StringTask("", push=ns, pull=['a','b'])
d.addCallback(lambda r: self.tc.run(t))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: {'a':tr.ns.a, 'b':tr['b']})
d.addCallback(lambda r: self.assertEquals(r, ns))
return d
def test_string_task_results(self):
self.addEngine(1)
t1 = task.StringTask('a=5', pull='a')
d = self.tc.run(t1)
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: (tr.ns.a,tr['a'],tr.failure, tr.raise_exception()))
d.addCallback(lambda r: self.assertEquals(r, (5,5,None,None)))
t2 = task.StringTask('7=5')
d.addCallback(lambda r: self.tc.run(t2))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: tr.ns)
d.addErrback(lambda f: self.assertRaises(SyntaxError, f.raiseException))
t3 = task.StringTask('', pull='b')
d.addCallback(lambda r: self.tc.run(t3))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda tr: tr.ns)
d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
return d
def test_map_task(self):
self.addEngine(1)
t1 = task.MapTask(lambda x: 2*x,(10,))
d = self.tc.run(t1)
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda r: self.assertEquals(r,20))
t2 = task.MapTask(lambda : 20)
d.addCallback(lambda _: self.tc.run(t2))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda r: self.assertEquals(r,20))
t3 = task.MapTask(lambda x: x,(),{'x':20})
d.addCallback(lambda _: self.tc.run(t3))
d.addCallback(self.tc.get_task_result, block=True)
d.addCallback(lambda r: self.assertEquals(r,20))
return d
def test_map_task_failure(self):
self.addEngine(1)
t1 = task.MapTask(lambda x: 1/0,(10,))
d = self.tc.run(t1)
d.addCallback(self.tc.get_task_result, block=True)
d.addErrback(lambda f: self.assertRaises(ZeroDivisionError, f.raiseException))
return d
def test_map_task_args(self):
self.assertRaises(TypeError, task.MapTask, 'asdfasdf')
self.assertRaises(TypeError, task.MapTask, lambda x: x, 10)
self.assertRaises(TypeError, task.MapTask, lambda x: x, (10,),30)
def test_clear(self):
self.addEngine(1)
t1 = task.MapTask(lambda x: 2*x,(10,))
d = self.tc.run(t1)
d.addCallback(lambda _: self.tc.get_task_result(0, block=True))
d.addCallback(lambda r: self.assertEquals(r,20))
d.addCallback(lambda _: self.tc.clear())
d.addCallback(lambda _: self.tc.get_task_result(0, block=True))
d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
return d
|
kaday/rose
|
refs/heads/master
|
lib/python/rose/env.py
|
1
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""Environment variable substitution in strings.
Note: os.path.expandvars(path) does not work correctly because unbound
environment variables are left unchanged.
"""
import os
import re
from rose.reporter import Event
# _RE_DEFAULT = re.compile(r"""
# \A # start
# (?P<head>.*?) # shortest of anything
# (?P<escape>\\*) # escapes
# (?P<symbol> # start symbol
# \$ # variable sigil, dollar
# (?P<brace_open>\{)? # brace open, optional
# (?P<name>[A-z_]\w*) # variable name
# (?(brace_open)\}) # brace close, if brace_open
# ) # end symbol
# (?P<tail>.*) # rest of string
# \Z # end
# """, re.M | re.S | re.X)
_RE_DEFAULT = re.compile(
r"\A"
r"(?P<head>.*?)"
r"(?P<escape>\\*)"
r"(?P<symbol>"
r"\$"
r"(?P<brace_open>\{)?"
r"(?P<name>[A-z_]\w*)"
r"(?(brace_open)\})"
r")"
r"(?P<tail>.*)"
r"\Z",
re.M | re.S)
# _RE_BRACE = re.compile(r"""
# \A # start
# (?P<head>.*?) # shortest of anything
# (?P<escape>\\*) # escapes
# (?P<symbol>\$\{ # start symbol ${
# (?P<name>[A-z_]\w*) # variable name
# \}) # } end symbol
# (?P<tail>.*) # rest of string
# \Z # end
# """, re.M | re.S | re.X)
_RE_BRACE = re.compile(
r"\A"
r"(?P<head>.*?)"
r"(?P<escape>\\*)"
r"(?P<symbol>\$\{"
r"(?P<name>[A-z_]\w*)"
r"\})"
r"(?P<tail>.*)"
r"\Z",
re.M | re.S)
_MATCH_MODES = {"brace": _RE_BRACE,
"default": _RE_DEFAULT,
None: _RE_DEFAULT}
_EXPORTED_ENVS = {}
class EnvExportEvent(Event):
"""Event raised when an environment variable is exported."""
RE_SHELL_ESCAPE = re.compile(r"([\"'\s])")
def __str__(self):
key, value = self.args
return "export %s=%s" % (key, self.RE_SHELL_ESCAPE.sub(r"\\\1", value))
class UnboundEnvironmentVariableError(Exception):
"""An error raised on attempt to substitute an unbound variable."""
def __repr__(self):
return "[UNDEFINED ENVIRONMENT VARIABLE] %s" % (self.args)
__str__ = __repr__
def env_export(key, value, event_handler=None):
"""Export an environment variable."""
if key not in _EXPORTED_ENVS or os.environ.get(key) != value:
# N.B. Should be safe, because the list of environment variables is
# normally quite small.
_EXPORTED_ENVS[key] = value
os.environ[key] = value
if callable(event_handler):
event_handler(EnvExportEvent(key, value))
def env_var_escape(text, match_mode=None):
"""Escape $NAME and ${NAME} syntax in "text"."""
ret = ""
tail = text
while tail:
match = _MATCH_MODES[match_mode].match(tail)
if match:
groups = match.groupdict()
ret += (groups["head"] + groups["escape"] * 2 + "\\" +
groups["symbol"])
tail = groups["tail"]
else:
ret += tail
tail = ""
return ret
def env_var_process(text, unbound=None, match_mode=None):
"""Substitute environment variables into a string.
For each $NAME and ${NAME} in "text", substitute with the value
of the environment variable NAME. If NAME is not defined in the
environment and "unbound" is None, raise an
UnboundEnvironmentVariableError. If NAME is not defined in the
environment and "unbound" is not None, substitute NAME with the
value of "unbound".
"""
ret = ""
tail = text
while tail:
match = _MATCH_MODES[match_mode].match(tail)
if match:
groups = match.groupdict()
substitute = groups["symbol"]
if len(groups["escape"]) % 2 == 0:
if groups["name"] in os.environ:
substitute = os.environ[groups["name"]]
elif unbound is not None:
substitute = str(unbound)
else:
raise UnboundEnvironmentVariableError(groups["name"])
ret += (groups["head"] +
groups["escape"][0:len(groups["escape"]) / 2] +
substitute)
tail = groups["tail"]
else:
ret += tail
tail = ""
return ret
def contains_env_var(text, match_mode=None):
"""Check if a string contains unescaped $NAME and/or ${NAME} syntax."""
match = _MATCH_MODES[match_mode].match(text)
return (match and len(match.groupdict()["escape"]) % 2 == 0)
if __name__ == "__main__":
import unittest
class _TestEnvExport(unittest.TestCase):
"""Test "env_export" function."""
def test_report_new(self):
"""Ensure that env_export only reports 1st time or on change."""
events = []
event_handler = lambda event: events.append(event)
env_export("FOO", "foo", event_handler)
env_export("FOO", "foo", event_handler)
env_export("FOO", "food", event_handler)
env_export("FOO", "foot", event_handler)
env_export("FOO", "foot", event_handler)
event_args = [event.args[1] for event in events]
self.assertEqual(event_args, ["foo", "food", "foot"], "events")
def test_report_old(self):
"""Ensure that env_export only reports 1st time or on change."""
events = []
event_handler = lambda event: events.append(event)
os.environ["BAR"] = "bar"
env_export("BAR", "bar", event_handler)
env_export("BAR", "bar", event_handler)
env_export("BAR", "bar", event_handler)
env_export("BAR", "barley", event_handler)
env_export("BAR", "barley", event_handler)
env_export("BAR", "barber", event_handler)
event_args = [event.args[1] for event in events]
self.assertEqual(event_args, ["bar", "barley", "barber"], "events")
unittest.main()
|
cwurld/django_tracker
|
refs/heads/master
|
tracker_demo/test_project/login_required_middleware.py
|
1
|
from django.conf import settings
from django.http import HttpResponseRedirect
import re
# Make re patterns
URLS = tuple([re.compile(url) for url in settings.LOGIN_NOT_REQUIRED_URLS])
class RequireLoginMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
for url in URLS:
if url.match(request.path):
return response
if request.user.is_anonymous:
return HttpResponseRedirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
return response
|
nonemaw/MATRIX_01
|
refs/heads/master
|
COMP9041/ass1/examples/3/l.py
|
1
|
#!/usr/bin/python2.7 -u
import subprocess
import sys
# l [file|directories...] - list files
# written by andrewt@cse.unsw.edu.au as a COMP2041 example
subprocess.call(['ls', '-las'] + sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.