repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
postrational/django
|
refs/heads/master
|
django/contrib/humanize/tests.py
|
4
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.contrib.humanize.templatetags import humanize
from django.template import Template, Context, defaultfilters
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.html import escape
from django.utils.timezone import utc
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils import tzinfo
from django.utils.unittest import skipIf
# Mock out datetime in some tests so they don't fail occasionally when they
# run too slow. Use a fixed datetime for datetime.now(). DST change in
# America/Chicago (the default time zone) happened on March 11th in 2012.
now = datetime.datetime(2012, 3, 9, 22, 30)
class MockDateTime(datetime.datetime):
@classmethod
def now(self, tz=None):
if tz is None or tz.utcoffset(now) is None:
return now
else:
# equals now.replace(tzinfo=utc)
return now.replace(tzinfo=tz) + tz.utcoffset(now)
class HumanizeTests(TestCase):
def humanize_tester(self, test_list, result_list, method):
for test_content, result in zip(test_list, result_list):
t = Template('{%% load humanize %%}{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, escape(result),
msg="%s test failed, produced '%s', should've produced '%s'" % (method, rendered, result))
def test_ordinal(self):
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'ordinal')
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567', Decimal('1234567.1234567'),
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567', '1,234,567.1234567',
None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_l10n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567', Decimal('1234567.1234567'),
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567', '1,234,567.1234567',
None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=False):
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intcomma_without_number_grouping(self):
# Regression for #17414
with translation.override('ja'):
with self.settings(USE_L10N=True):
self.humanize_tester([100], ['100'], 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000',
'1300000000000000', '3500000000000000000000',
'8100000000000000000000000000000000', None)
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion',
'1.3 quadrillion', '3.5 sextillion',
'8.1 decillion', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intword')
def test_i18n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', None)
result_list = ('100', '1.000', '10.123', '10.311', '1.000.000', '1.234.567,25',
'100', '1.000', '10.123', '10.311', '1.000.000', None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_i18n_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000')
result_list = ('100', '1,0 Million', '1,2 Millionen', '1,3 Millionen',
'1,0 Milliarde', '2,0 Milliarden', '6,0 Billionen')
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = ('one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', '10', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
someday = today - datetime.timedelta(days=10)
notdate = "I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (_('today'), _('yesterday'), _('tomorrow'),
someday_result, "I'm not a date value", None)
self.humanize_tester(test_list, result_list, 'naturalday')
def test_naturalday_tz(self):
today = datetime.date.today()
tz_one = tzinfo.FixedOffset(datetime.timedelta(hours=-12))
tz_two = tzinfo.FixedOffset(datetime.timedelta(hours=12))
# Can be today or yesterday
date_one = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_one)
naturalday_one = humanize.naturalday(date_one)
# Can be today or tomorrow
date_two = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_two)
naturalday_two = humanize.naturalday(date_two)
# As 24h of difference they will never be the same
self.assertNotEqual(naturalday_one, naturalday_two)
@skipIf(settings.TIME_ZONE != "America/Chicago" and pytz is None,
"this test requires pytz when a non-default time zone is set")
def test_naturalday_uses_localtime(self):
# Regression for #18504
# This is 2012-03-08HT19:30:00-06:00 in America/Chicago
dt = datetime.datetime(2012, 3, 9, 1, 30, tzinfo=utc)
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with override_settings(TIME_ZONE="America/Chicago", USE_TZ=True):
with translation.override('en'):
self.humanize_tester([dt], ['yesterday'], 'naturalday')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
test_list = [
now,
now - datetime.timedelta(seconds=1),
now - datetime.timedelta(seconds=30),
now - datetime.timedelta(minutes=1, seconds=30),
now - datetime.timedelta(minutes=2),
now - datetime.timedelta(hours=1, minutes=30, seconds=30),
now - datetime.timedelta(hours=23, minutes=50, seconds=50),
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=500),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=30),
now + datetime.timedelta(minutes=1, seconds=30),
now + datetime.timedelta(minutes=2),
now + datetime.timedelta(hours=1, minutes=30, seconds=30),
now + datetime.timedelta(hours=23, minutes=50, seconds=50),
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2, hours=6),
now + datetime.timedelta(days=500),
now.replace(tzinfo=naive()),
now.replace(tzinfo=utc),
]
result_list = [
'now',
'a second ago',
'30 seconds ago',
'a minute ago',
'2 minutes ago',
'an hour ago',
'23 hours ago',
'1 day ago',
'1 year, 4 months ago',
'a second from now',
'30 seconds from now',
'a minute from now',
'2 minutes from now',
'an hour from now',
'23 hours from now',
'1 day from now',
'2 days, 6 hours from now',
'1 year, 4 months from now',
'now',
'now',
]
# Because of the DST change, 2 days and 6 hours after the chosen
# date in naive arithmetic is only 2 days and 5 hours after in
# aware arithmetic.
result_list_with_tz_support = result_list[:]
assert result_list_with_tz_support[-4] == '2 days, 6 hours from now'
result_list_with_tz_support[-4] == '2 days, 5 hours from now'
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'naturaltime')
with override_settings(USE_TZ=True):
self.humanize_tester(
test_list, result_list_with_tz_support, 'naturaltime')
finally:
humanize.datetime = orig_humanize_datetime
|
studio666/gnuradio
|
refs/heads/master
|
gr-utils/python/modtool/gr-newmod/docs/doxygen/doxyxml/generated/index.py
|
344
|
#!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
import os
import sys
import compound
import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class CompoundTypeSub(supermod.CompoundType):
def __init__(self, kind=None, refid=None, name='', member=None):
supermod.CompoundType.__init__(self, kind, refid, name, member)
def find_members(self, details):
"""
Returns a list of all members which match details
"""
results = []
for member in self.member:
if details.match(member):
results.append(member)
return results
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
|
robertmattmueller/sdac-compiler
|
refs/heads/master
|
sympy/physics/vector/tests/__init__.py
|
12133432
| |
AmrThabet/CouchPotatoServer
|
refs/heads/master
|
libs/oauth2/clients/__init__.py
|
12133432
| |
OndrejIT/pyload
|
refs/heads/stable
|
module/lib/__init__.py
|
12133432
| |
android-ia/platform_external_chromium_org
|
refs/heads/master
|
media/tools/layout_tests/test_expectations_history_unittest.py
|
156
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
import calendar
import unittest
from test_expectations_history import TestExpectationsHistory
class TestTestExpectationsHistory(unittest.TestCase):
"""Unit tests for the TestExpectationsHistory class."""
def AssertTestName(self, result_list, testname):
"""Assert test name in the result_list.
Args:
result_list: a result list of tuples returned by
|GetDiffBetweenTimesOnly1Diff()|. Each tuple consists of
(old_rev, new_rev, author, date, message, lines) where
|lines| are the entries in the test expectation file.
testname: a testname string.
Returns:
True if the result contains the testname, False otherwise.
"""
for (_, _, _, _, _, lines) in result_list:
if any([testname in line for line in lines]):
return True
return False
# These tests use the following commit.
# commit 235788e3a4fc71342a5c9fefe67ce9537706ce35
# Author: rniwa@webkit.org
# Date: Sat Aug 20 06:19:11 2011 +0000
def testGetDiffBetweenTimes(self):
ptime = calendar.timegm((2011, 8, 20, 0, 0, 0, 0, 0, 0))
ctime = calendar.timegm((2011, 8, 21, 0, 0, 0, 0, 0, 0))
testname = 'fast/css/getComputedStyle/computed-style-without-renderer.html'
testname_list = [testname]
result_list = TestExpectationsHistory.GetDiffBetweenTimes(
ptime, ctime, testname_list)
self.assertTrue(self.AssertTestName(result_list, testname))
def testGetDiffBetweenTimesOnly1Diff(self):
ptime = calendar.timegm((2011, 8, 20, 6, 0, 0, 0, 0, 0))
ctime = calendar.timegm((2011, 8, 20, 7, 0, 0, 0, 0, 0))
testname = 'fast/css/getComputedStyle/computed-style-without-renderer.html'
testname_list = [testname]
result_list = TestExpectationsHistory.GetDiffBetweenTimes(
ptime, ctime, testname_list)
self.assertTrue(self.AssertTestName(result_list, testname))
def testGetDiffBetweenTimesOnly1DiffWithGobackSeveralDays(self):
ptime = calendar.timegm((2011, 9, 12, 1, 0, 0, 0, 0, 0))
ctime = calendar.timegm((2011, 9, 12, 2, 0, 0, 0, 0, 0))
testname = 'media/video-zoom-controls.html'
testname_list = [testname]
result_list = TestExpectationsHistory.GetDiffBetweenTimes(
ptime, ctime, testname_list)
self.assertTrue(self.AssertTestName(result_list, testname))
if __name__ == '__main__':
unittest.main()
|
xin3liang/platform_external_chromium_org
|
refs/heads/master
|
third_party/cython/src/pyximport/__init__.py
|
110
|
from pyximport import *
# replicate docstring
from pyximport import __doc__
|
marcosbontempo/inatelos
|
refs/heads/master
|
poky-daisy/scripts/lib/bsp/__init__.py
|
3
|
#
# Yocto BSP tools library
#
# Copyright (c) 2012, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] intel.com>
#
|
souravbadami/zulip
|
refs/heads/master
|
zerver/webhooks/transifex/tests.py
|
25
|
# -*- coding: utf-8 -*-
from typing import Any, Dict, Text
from zerver.lib.test_classes import WebhookTestCase
class TransifexHookTests(WebhookTestCase):
STREAM_NAME = 'transifex'
URL_TEMPLATE = u"/api/v1/external/transifex?stream={stream}&api_key={api_key}&{data_template}"
URL_DATA_TEMPLATE = "project={project}&language={language}&resource={resource}&{method}"
URL_REVIEWED_METHOD_TEMPLATE = "reviewed=100"
URL_TRANSLATED_METHOD_TEMPLATE = "translated=100"
FIXTURE_DIR_NAME = 'transifex'
PROJECT = 'project-title'
LANGUAGE = 'en'
RESOURCE = 'file'
REVIEWED = True
def test_transifex_reviewed_message(self):
# type: () -> None
self.REVIEWED = True
expected_subject = "{} in {}".format(self.PROJECT, self.LANGUAGE)
expected_message = "Resource {} fully reviewed.".format(self.RESOURCE)
self.url = self.build_webhook_url()
self.send_and_test_stream_message(None, expected_subject, expected_message)
def test_transifex_translated_message(self):
# type: () -> None
self.REVIEWED = False
expected_subject = "{} in {}".format(self.PROJECT, self.LANGUAGE)
expected_message = "Resource {} fully translated.".format(self.RESOURCE)
self.url = self.build_webhook_url()
self.send_and_test_stream_message(None, expected_subject, expected_message)
self.REVIEWED = True
def build_webhook_url(self):
# type: () -> Text
url_data = self.URL_DATA_TEMPLATE.format(
project=self.PROJECT,
language=self.LANGUAGE,
resource=self.RESOURCE,
method=self.URL_REVIEWED_METHOD_TEMPLATE if self.REVIEWED else self.URL_TRANSLATED_METHOD_TEMPLATE
)
api_key = self.get_api_key(self.TEST_USER_EMAIL)
return self.URL_TEMPLATE.format(api_key=api_key, stream=self.STREAM_NAME, data_template=url_data)
def get_body(self, fixture_name):
# type: (Text) -> Dict[str, Any]
return {}
|
Distrotech/bzr
|
refs/heads/distrotech-bzr
|
bzrlib/plugins/changelog_merge/tests/test_changelog_merge.py
|
2
|
# Copyright (C) 2011 by Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from bzrlib import (
merge,
tests,
)
from bzrlib.tests import test_merge_core
from bzrlib.plugins.changelog_merge import changelog_merge
sample_base_entries = [
'Base entry B1',
'Base entry B2',
'Base entry B3',
]
sample_this_entries = [
'This entry T1',
'This entry T2',
#'Base entry B1 updated',
'Base entry B1',
'Base entry B2',
'Base entry B3',
]
sample_other_entries = [
'Other entry O1',
#'Base entry B1',
'Base entry B1',
'Base entry B2 updated',
'Base entry B3',
]
sample2_base_entries = [
'Base entry B1',
'Base entry B2',
'Base entry B3',
]
sample2_this_entries = [
'This entry T1',
'This entry T2',
#'Base entry B1 updated',
'Base entry B1',
'Base entry B2',
]
sample2_other_entries = [
'Other entry O1',
#'Base entry B1',
'Base entry B1 edit', # > 80% similar according to difflib
'Base entry B2',
]
class TestMergeCoreLogic(tests.TestCase):
def test_new_in_other_floats_to_top(self):
"""Changes at the top of 'other' float to the top.
Given a changelog in THIS containing::
NEW-1
OLD-1
and a changelog in OTHER containing::
NEW-2
OLD-1
it will merge as::
NEW-2
NEW-1
OLD-1
"""
base_entries = ['OLD-1']
this_entries = ['NEW-1', 'OLD-1']
other_entries = ['NEW-2', 'OLD-1']
result_entries = changelog_merge.merge_entries(
base_entries, this_entries, other_entries)
self.assertEqual(
['NEW-2', 'NEW-1', 'OLD-1'], result_entries)
def test_acceptance_bug_723968(self):
"""Merging a branch that:
1. adds a new entry, and
2. edits an old entry (e.g. to fix a typo or twiddle formatting)
will:
1. add the new entry to the top
2. keep the edit, without duplicating the edited entry or moving it.
"""
result_entries = changelog_merge.merge_entries(
sample_base_entries, sample_this_entries, sample_other_entries)
self.assertEqual([
'Other entry O1',
'This entry T1',
'This entry T2',
'Base entry B1',
'Base entry B2 updated',
'Base entry B3',
],
list(result_entries))
def test_more_complex_conflict(self):
"""Like test_acceptance_bug_723968, but with a more difficult conflict:
the new entry and the edited entry are adjacent.
"""
def guess_edits(new, deleted):
#import pdb; pdb.set_trace()
return changelog_merge.default_guess_edits(new, deleted,
entry_as_str=lambda x: x)
result_entries = changelog_merge.merge_entries(
sample2_base_entries, sample2_this_entries, sample2_other_entries,
guess_edits=guess_edits)
self.assertEqual([
'Other entry O1',
'This entry T1',
'This entry T2',
'Base entry B1 edit',
'Base entry B2',
],
list(result_entries))
def test_too_hard(self):
"""A conflict this plugin cannot resolve raises EntryConflict.
"""
# An entry edited in other but deleted in this is a conflict we can't
# resolve. (Ideally perhaps we'd generate a nice conflict file, but
# for now we just give up.)
self.assertRaises(changelog_merge.EntryConflict,
changelog_merge.merge_entries,
sample2_base_entries, [], sample2_other_entries)
def test_default_guess_edits(self):
"""default_guess_edits matches a new entry only once.
(Even when that entry is the best match for multiple old entries.)
"""
new_in_other = [('AAAAA',), ('BBBBB',)]
deleted_in_other = [('DDDDD',), ('BBBBBx',), ('BBBBBxx',)]
# BBBBB is the best match for both BBBBBx and BBBBBxx
result = changelog_merge.default_guess_edits(
new_in_other, deleted_in_other)
self.assertEqual(
([('AAAAA',)], # new
[('DDDDD',), ('BBBBBxx',)], # deleted
[(('BBBBBx',), ('BBBBB',))]), # edits
result)
class TestChangeLogMerger(tests.TestCaseWithTransport):
"""Tests for ChangeLogMerger class.
Most tests should be unit tests for merge_entries (and its helpers).
This class is just to cover the handful of lines of code in ChangeLogMerger
itself.
"""
def make_builder(self):
builder = test_merge_core.MergeBuilder(self.test_base_dir)
self.addCleanup(builder.cleanup)
return builder
def make_changelog_merger(self, base_text, this_text, other_text):
builder = self.make_builder()
builder.add_file('clog-id', builder.tree_root, 'ChangeLog',
base_text, True)
builder.change_contents('clog-id', other=other_text, this=this_text)
merger = builder.make_merger(merge.Merge3Merger, ['clog-id'])
# The following can't use config stacks until the plugin itself does
# ('this_branch' is already write locked at this point and as such
# won't write the new value to disk where get_user_option can get it).
merger.this_branch.get_config().set_user_option(
'changelog_merge_files', 'ChangeLog')
merge_hook_params = merge.MergeFileHookParams(merger, 'clog-id', None,
'file', 'file', 'conflict')
changelog_merger = changelog_merge.ChangeLogMerger(merger)
return changelog_merger, merge_hook_params
def test_merge_text_returns_not_applicable(self):
"""A conflict this plugin cannot resolve returns (not_applicable, None).
"""
# Build same example as TestMergeCoreLogic.test_too_hard: edit an entry
# in other but delete it in this.
def entries_as_str(entries):
return ''.join(entry + '\n' for entry in entries)
changelog_merger, merge_hook_params = self.make_changelog_merger(
entries_as_str(sample2_base_entries),
'',
entries_as_str(sample2_other_entries))
self.assertEqual(
('not_applicable', None),
changelog_merger.merge_contents(merge_hook_params))
def test_merge_text_returns_success(self):
"""A successful merge returns ('success', lines)."""
changelog_merger, merge_hook_params = self.make_changelog_merger(
'', 'this text\n', 'other text\n')
status, lines = changelog_merger.merge_contents(merge_hook_params)
self.assertEqual(
('success', ['other text\n', 'this text\n']),
(status, list(lines)))
|
shravan-achar/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/_pytest/standalonetemplate.py
|
203
|
#! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# a fully functional basic pytest script.
#
# Pytest is a thing that tests packages, pytest itself is a package that some-
# one might want to install, especially if they're looking to run tests inside
# some package they want to install. Pytest has a lot of code to collect and
# execute tests, and other such sort of "tribal knowledge" that has been en-
# coded in its code base. Because of this we basically include a basic copy
# of pytest inside this blob. We do this because it let's you as a maintainer
# or application developer who wants people who don't deal with python much to
# easily run tests without installing the complete pytest package.
#
# If you're wondering how this is created: you can create it yourself if you
# have a complete pytest installation by using this command on the command-
# line: ``py.test --genscript=runtests.py``.
sources = """
@SOURCES@"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
try:
import pkg_resources # noqa
except ImportError:
sys.stderr.write("ERROR: setuptools not installed\n")
sys.exit(2)
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "@ENTRY@"
do_exec(entry, locals()) # noqa
|
marqueedev/django
|
refs/heads/master
|
tests/logging_tests/views.py
|
342
|
from __future__ import unicode_literals
from django.core.exceptions import DisallowedHost, SuspiciousOperation
def suspicious(request):
raise SuspiciousOperation('dubious')
def suspicious_spec(request):
raise DisallowedHost('dubious')
|
jmighion/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/docker/docker_image.py
|
28
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_image
short_description: Manage docker images.
version_added: "1.5"
description:
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
image into a repository and archiving an image to a .tar file.
options:
archive_path:
description:
- Use with state C(present) to archive an image to a .tar file.
required: false
version_added: "2.1"
load_path:
description:
- Use with state C(present) to load an image from a .tar file.
required: false
version_added: "2.2"
dockerfile:
description:
- Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image.
default: Dockerfile
required: false
version_added: "2.0"
force:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
C(present) to build, load or pull an image when the image already exists.
default: false
required: false
version_added: "2.1"
type: bool
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
required: false
version_added: "2.1"
name:
description:
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
required: true
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
aliases:
- build_path
required: false
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
default: true
required: false
version_added: "2.1"
type: bool
push:
description:
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
default: false
required: false
version_added: "2.2"
type: bool
rm:
description:
- Remove intermediate containers after build.
default: true
required: false
version_added: "2.1"
type: bool
nocache:
description:
- Do not use cache when building an image.
default: false
required: false
type: bool
repository:
description:
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
required: false
version_added: "2.1"
state:
description:
- Make assertions about the state of an image.
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
matching the provided name.
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
force option is used, the image will either be pulled, built or loaded. By default the image will be pulled
from Docker Hub. To build the image, provide a path value set to a directory containing a context and
Dockerfile. To load an image, specify load_path to provide a path to an archive file. To tag an image to a
repository, provide a repository path. If the name contains a repository path, it will be pushed.
- "NOTE: C(build) is DEPRECATED and will be removed in release 2.3. Specifying C(build) will behave the
same as C(present)."
required: false
default: present
choices:
- absent
- present
- build
tag:
description:
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
I(latest).
- If C(name) parameter format is I(name:tag), then tag value from C(name) will take precedence.
default: latest
required: false
buildargs:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21 and docker-py >= 1.7.0.
required: false
version_added: "2.2"
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
required: false
version_added: "2.1"
suboptions:
memory:
description:
- Set memory limit for build.
memswap:
description:
- Total memory (memory + swap), -1 to disable swap.
cpushares:
description:
- CPU shares (relative weight).
cpusetcpus:
description:
- CPUs in which to allow execution, e.g., "0-3", "0,1".
use_tls:
description:
- "DEPRECATED. Whether to use tls to connect to the docker server. Set to C(no) when TLS will not be used. Set to
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that the server's certificate is valid for the
server. NOTE: If you specify this option, it will set the value of the tls or tls_verify parameters."
choices:
- no
- encrypt
- verify
default: no
required: false
version_added: "2.0"
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
author:
- Pavel Antonov (@softzilla)
- Chris Houseknecht (@chouseknecht)
- James Tanner (@jctanner)
'''
EXAMPLES = '''
- name: pull an image
docker_image:
name: pacur/centos-7
- name: Tag and push to docker hub
docker_image:
name: pacur/centos-7
repository: dcoppenhagan/myimage
tag: 7.0
push: yes
- name: Tag and push to local registry
docker_image:
name: centos
repository: localhost:5000/centos
tag: 7
push: yes
- name: Remove image
docker_image:
state: absent
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
- name: Build an image and push it to a private repo
docker_image:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
push: yes
- name: Archive image
docker_image:
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
archive_path: my_sinatra.tar
- name: Load image from archive and push to a private registry
docker_image:
name: localhost:5000/myimages/sinatra
tag: v1
push: yes
load_path: my_sinatra.tar
- name: Build image and with buildargs
docker_image:
path: /path/to/build/dir
name: myimage
buildargs:
log_volume: /var/log/myapp
listen_port: 8080
'''
RETURN = '''
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
'''
import os
import re
from ansible.module_utils.docker_common import HAS_DOCKER_PY_2, AnsibleDockerClient, DockerBaseClass
from ansible.module_utils._text import to_native
try:
if HAS_DOCKER_PY_2:
from docker.auth import resolve_repository_name
else:
from docker.auth.auth import resolve_repository_name
from docker.utils.utils import parse_repository_tag
except ImportError:
# missing docker-py handled in docker_common
pass
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.archive_path = parameters.get('archive_path')
self.container_limits = parameters.get('container_limits')
self.dockerfile = parameters.get('dockerfile')
self.force = parameters.get('force')
self.load_path = parameters.get('load_path')
self.name = parameters.get('name')
self.nocache = parameters.get('nocache')
self.path = parameters.get('path')
self.pull = parameters.get('pull')
self.repository = parameters.get('repository')
self.rm = parameters.get('rm')
self.state = parameters.get('state')
self.tag = parameters.get('tag')
self.http_timeout = parameters.get('http_timeout')
self.push = parameters.get('push')
self.buildargs = parameters.get('buildargs')
# If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if self.state in ['present', 'build']:
self.present()
elif self.state == 'absent':
self.absent()
def fail(self, msg):
self.client.fail(msg)
def present(self):
'''
Handles state = 'present', which includes building, loading or pulling an image,
depending on user provided parameters.
:returns None
'''
image = self.client.find_image(name=self.name, tag=self.tag)
if not image or self.force:
if self.path:
# Build the image
if not os.path.isdir(self.path):
self.fail("Requested build path %s could not be found or you do not have access." % self.path)
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.log("Building image %s" % image_name)
self.results['actions'].append("Built image %s from %s" % (image_name, self.path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.build_image()
elif self.load_path:
# Load the image from an archive
if not os.path.isfile(self.load_path):
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
self.load_path))
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.load_image()
else:
# pull the image
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.client.pull_image(self.name, tag=self.tag)
if image and image == self.results['image']:
self.results['changed'] = False
if self.archive_path:
self.archive_image(self.name, self.tag)
if self.push and not self.repository:
self.push_image(self.name, self.tag)
elif self.repository:
self.tag_image(self.name, self.tag, self.repository, force=self.force, push=self.push)
def absent(self):
'''
Handles state = 'absent', which removes an image.
:return None
'''
image = self.client.find_image(self.name, self.tag)
if image:
name = self.name
if self.tag:
name = "%s:%s" % (self.name, self.tag)
if not self.check_mode:
try:
self.client.remove_image(name, force=self.force)
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, str(exc)))
self.results['changed'] = True
self.results['actions'].append("Removed image %s" % (name))
self.results['image']['state'] = 'Deleted'
def archive_image(self, name, tag):
'''
Archive an image to a .tar file. Called when archive_path is passed.
:param name - name of the image. Type: str
:return None
'''
if not tag:
tag = "latest"
image = self.client.find_image(name=name, tag=tag)
if not image:
self.log("archive image: image %s:%s not found" % (name, tag))
return
image_name = "%s:%s" % (name, tag)
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
self.results['changed'] = True
if not self.check_mode:
self.log("Getting archive of image %s" % image_name)
try:
image = self.client.get_image(image_name)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_name, str(exc)))
try:
with open(self.archive_path, 'w') as fd:
for chunk in image.stream(2048, decode_content=False):
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
image = self.client.find_image(name=name, tag=tag)
if image:
self.results['image'] = image
def push_image(self, name, tag=None):
'''
If the name of the image contains a repository path, then push the image.
:param name Name of the image to push.
:param tag Use a specific tag.
:return: None
'''
repository = name
if not tag:
repository, tag = parse_repository_tag(name)
registry, repo_name = resolve_repository_name(repository)
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
if registry:
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.results['changed'] = True
if not self.check_mode:
status = None
try:
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
status = line.get('status')
except Exception as exc:
if re.search('unauthorized', str(exc)):
if re.search('authentication required', str(exc)):
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(registry, repo_name, tag, str(exc), registry))
else:
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(registry, repo_name, tag, str(exc)))
self.fail("Error pushing image %s: %s" % (repository, str(exc)))
self.results['image'] = self.client.find_image(name=repository, tag=tag)
if not self.results['image']:
self.results['image'] = dict()
self.results['image']['push_status'] = status
def tag_image(self, name, tag, repository, force=False, push=False):
'''
Tag an image into a repository.
:param name: name of the image. required.
:param tag: image tag.
:param repository: path to the repository. required.
:param force: bool. force tagging, even it image already exists with the repository path.
:param push: bool. push the image once it's tagged.
:return: None
'''
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = "latest"
if tag:
repo_tag = tag
image = self.client.find_image(name=repo, tag=repo_tag)
found = 'found' if image else 'not found'
self.log("image %s was %s" % (repo, found))
if not image or force:
self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
self.results['changed'] = True
self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
# cases, if we don't set force=True, it errors.
image_name = name
if tag and not re.search(tag, name):
image_name = "%s:%s" % (name, tag)
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
if not tag_status:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image - %s" % str(exc))
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
if push:
self.push_image(repo, repo_tag)
def build_image(self):
'''
Build an image
:return: image dict
'''
params = dict(
path=self.path,
tag=self.name,
rm=self.rm,
nocache=self.nocache,
stream=True,
timeout=self.http_timeout,
pull=self.pull,
forcerm=self.rm,
dockerfile=self.dockerfile,
decode=True
)
build_output = []
if self.tag:
params['tag'] = "%s:%s" % (self.name, self.tag)
if self.container_limits:
params['container_limits'] = self.container_limits
if self.buildargs:
for key, value in self.buildargs.items():
self.buildargs[key] = to_native(value)
params['buildargs'] = self.buildargs
for line in self.client.build(**params):
# line = json.loads(line)
self.log(line, pretty_print=True)
if "stream" in line:
build_output.append(line["stream"])
if line.get('error'):
if line.get('errorDetail'):
errorDetail = line.get('errorDetail')
self.fail(
"Error building %s - code: %s, message: %s, logs: %s" % (
self.name,
errorDetail.get('code'),
errorDetail.get('message'),
build_output))
else:
self.fail("Error building %s - message: %s, logs: %s" % (
self.name, line.get('error'), build_output))
return self.client.find_image(name=self.name, tag=self.tag)
def load_image(self):
'''
Load an image from a .tar archive
:return: image dict
'''
try:
self.log("Opening image %s" % self.load_path)
image_tar = open(self.load_path, 'r')
except Exception as exc:
self.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
try:
self.log("Loading image from %s" % self.load_path)
self.client.load_image(image_tar)
except Exception as exc:
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
try:
image_tar.close()
except Exception as exc:
self.fail("Error closing image %s - %s" % (self.name, str(exc)))
return self.client.find_image(self.name, self.tag)
def main():
argument_spec = dict(
archive_path=dict(type='path'),
container_limits=dict(type='dict'),
dockerfile=dict(type='str'),
force=dict(type='bool', default=False),
http_timeout=dict(type='int'),
load_path=dict(type='path'),
name=dict(type='str', required=True),
nocache=dict(type='bool', default=False),
path=dict(type='path', aliases=['build_path']),
pull=dict(type='bool', default=True),
push=dict(type='bool', default=False),
repository=dict(type='str'),
rm=dict(type='bool', default=True),
state=dict(type='str', choices=['absent', 'present', 'build'], default='present'),
tag=dict(type='str', default='latest'),
use_tls=dict(type='str', default='no', choices=['no', 'encrypt', 'verify']),
buildargs=dict(type='dict', default=None),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
results = dict(
changed=False,
actions=[],
image={}
)
ImageManager(client, results)
client.module.exit_json(**results)
if __name__ == '__main__':
main()
|
mayblue9/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/online_lda.py
|
17
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import NotFittedError, check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calcuate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or and negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calcuate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
ShinyROM/android_external_chromium_org
|
refs/heads/master
|
tools/perf/measurements/startwithurl.py
|
23
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import startup
from metrics import startup_metric
class StartWithUrl(startup.Startup):
"""Measures Chromium's startup performance when started with a URL
This test inherits support for the --warm or --cold command line options -
see startup.py for details.
"""
def __init__(self):
super(StartWithUrl, self).__init__()
self.close_tabs_before_run = False
def AddCommandLineOptions(self, parser):
super(StartWithUrl, self).AddCommandLineOptions(parser)
parser.add_option('--url', action='store', default=None,
help='Start with a request to open a specific URL')
def CustomizeBrowserOptions(self, options):
super(StartWithUrl, self).CustomizeBrowserOptions(options)
if options.url:
browser_options = options.browser_options
browser_options.startup_url = options.url
options.AppendExtraBrowserArgs([
'--restore-last-session'
])
def CanRunForPage(self, page):
# No matter how many pages in the pageset, just perform one test iteration.
return page.page_set.pages.index(page) == 0
def RunNavigateSteps(self, page, tab):
# Overriden so that no page navigation occurs.
pass
def ValidatePageSet(self, page_set):
# Reject any pageset that contains more than one WPR archive.
wpr_archives = {}
for page in page_set:
wpr_archives[page_set.WprFilePathForPage(page)] = True
if len(wpr_archives.keys()) > 1:
raise Exception("Invalid pageset: more than 1 WPR archive found.: " +
', '.join(wpr_archives.keys()))
def MeasurePage(self, page, tab, results):
# Wait for all tabs to finish loading.
for i in xrange(len(tab.browser.tabs)):
t = tab.browser.tabs[i]
t.WaitForDocumentReadyStateToBeComplete()
startup_metric.StartupMetric().AddResults(tab, results)
|
HybridF5/nova
|
refs/heads/master
|
nova/tests/unit/virt/test_virt_drivers.py
|
6
|
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import sys
import traceback
import fixtures
import mock
import netaddr
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova.compute import manager
from nova.console import type as ctype
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_block_device
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.virt import block_device as driver_block_device
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import hardware
from nova.virt import libvirt
from nova.virt.libvirt import imagebackend
LOG = logging.getLogger(__name__)
def catch_notimplementederror(f):
"""Decorator to simplify catching drivers raising NotImplementedError
If a particular call makes a driver raise NotImplementedError, we
log it so that we can extract this information afterwards as needed.
"""
def wrapped_func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except NotImplementedError:
frame = traceback.extract_tb(sys.exc_info()[2])[-1]
LOG.error("%(driver)s does not implement %(method)s "
"required for test %(test)s" %
{'driver': type(self.connection),
'method': frame[2], 'test': f.__name__})
wrapped_func.__name__ = f.__name__
wrapped_func.__doc__ = f.__doc__
return wrapped_func
class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
# default instances_path which doesn't exist
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# Put fakelibvirt in place
if 'libvirt' in sys.modules:
self.saved_libvirt = sys.modules['libvirt']
else:
self.saved_libvirt = None
import nova.tests.unit.virt.libvirt.fake_imagebackend as \
fake_imagebackend
import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \
fake_libvirt_utils
import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt
import nova.tests.unit.virt.libvirt.fake_os_brick_connector as \
fake_os_brick_connector
sys.modules['libvirt'] = fakelibvirt
import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
import nova.virt.libvirt.host
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.imagebackend',
fake_imagebackend))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.firewall.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.connector',
fake_os_brick_connector))
fakelibvirt.disable_event_thread(self)
self.flags(rescue_image_id="2",
rescue_kernel_id="3",
rescue_ramdisk_id=None,
snapshots_directory='./',
sysinfo_serial='none',
group='libvirt')
def fake_extend(image, size):
pass
def fake_migrateToURI(*a):
pass
def fake_make_drive(_self, _path):
pass
def fake_get_instance_disk_info(_self, instance, xml=None,
block_device_info=None):
return '[]'
def fake_delete_instance_files(_self, _instance):
pass
def fake_wait():
pass
def fake_detach_device_with_retry(_self, get_device_conf_func, device,
persistent, live,
max_retry_count=7,
inc_sleep_time=2,
max_sleep_time=30):
# Still calling detach, but instead of returning function
# that actually checks if device is gone from XML, just continue
# because XML never gets updated in these tests
_self.detach_device(get_device_conf_func(device),
persistent=persistent,
live=live)
return fake_wait
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'_get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'delete_instance_files',
fake_delete_instance_files)
self.stubs.Set(nova.virt.libvirt.guest.Guest,
'detach_device_with_retry',
fake_detach_device_with_retry)
# Like the existing fakelibvirt.migrateToURI, do nothing,
# but don't fail for these tests.
self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
'migrateToURI', fake_migrateToURI)
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def _teardown_fakelibvirt(self):
# Restore libvirt
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
def setUp(self):
super(_FakeDriverBackendTestCase, self).setUp()
# TODO(sdague): it would be nice to do this in a way that only
# the relevant backends where replaced for tests, though this
# should not harm anything by doing it for all backends
fake_image.stub_out_image_service(self)
self._setup_fakelibvirt()
def tearDown(self):
fake_image.FakeImageService_reset()
self._teardown_fakelibvirt()
super(_FakeDriverBackendTestCase, self).tearDown()
class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class.
"""
# if your driver supports being tested in a fake way, it can go here
#
# both long form and short form drivers are supported
new_drivers = {
'nova.virt.fake.FakeDriver': 'FakeDriver',
'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
'fake.FakeDriver': 'FakeDriver',
'libvirt.LibvirtDriver': 'LibvirtDriver'
}
def test_load_new_drivers(self):
for cls, driver in six.iteritems(self.new_drivers):
self.flags(compute_driver=cls)
# NOTE(sdague) the try block is to make it easier to debug a
# failure by knowing which driver broke
try:
cm = manager.ComputeManager()
except Exception as e:
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
"Could't load driver %s" % cls)
def test_fail_to_load_new_drivers(self):
self.flags(compute_driver='nova.virt.amiga')
def _fake_exit(error):
raise test.TestingException()
self.stubs.Set(sys, 'exit', _fake_exit)
self.assertRaises(test.TestingException, manager.ComputeManager)
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.connection = importutils.import_object(self.driver_module,
fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
# NOTE(dripton): resolve_driver_format does some file reading and
# writing and chowning that complicate testing too much by requiring
# using real directories with proper permissions. Just stub it out
# here; we test it in test_imagebackend.py
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
def _get_running_instance(self, obj=True):
instance_ref = test_utils.get_test_instance(obj=obj)
network_info = test_utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
image_meta = test_utils.get_test_image_object(None, instance_ref)
self.connection.spawn(self.ctxt, instance_ref, image_meta,
[], 'herp', network_info=network_info)
return instance_ref, network_info
@catch_notimplementederror
def test_init_host(self):
self.connection.init_host('myhostname')
@catch_notimplementederror
def test_list_instances(self):
self.connection.list_instances()
@catch_notimplementederror
def test_list_instance_uuids(self):
self.connection.list_instance_uuids()
@catch_notimplementederror
def test_spawn(self):
instance_ref, network_info = self._get_running_instance()
domains = self.connection.list_instances()
self.assertIn(instance_ref['name'], domains)
num_instances = self.connection.get_num_instances()
self.assertEqual(1, num_instances)
@catch_notimplementederror
def test_snapshot_not_running(self):
instance_ref = test_utils.get_test_instance()
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_post_interrupted_snapshot_cleanup(self):
instance_ref, network_info = self._get_running_instance()
self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
instance_ref)
@catch_notimplementederror
def test_reboot(self):
reboot_type = "SOFT"
instance_ref, network_info = self._get_running_instance()
self.connection.reboot(self.ctxt, instance_ref, network_info,
reboot_type)
@catch_notimplementederror
def test_get_host_ip_addr(self):
host_ip = self.connection.get_host_ip_addr()
# Will raise an exception if it's not a valid IP at all
ip = netaddr.IPAddress(host_ip)
# For now, assume IPv4.
self.assertEqual(ip.version, 4)
@catch_notimplementederror
def test_set_admin_password(self):
instance, network_info = self._get_running_instance(obj=True)
self.connection.set_admin_password(instance, 'p4ssw0rd')
@catch_notimplementederror
def test_inject_file(self):
instance_ref, network_info = self._get_running_instance()
self.connection.inject_file(instance_ref,
base64.b64encode('/testfile'),
base64.b64encode('testcontents'))
@catch_notimplementederror
def test_resume_state_on_host_boot(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
network_info)
@catch_notimplementederror
def test_rescue(self):
image_meta = objects.ImageMeta.from_dict({})
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info,
image_meta, '')
@catch_notimplementederror
def test_unrescue_unrescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_unrescue_rescued_instance(self):
image_meta = objects.ImageMeta.from_dict({})
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info,
image_meta, '')
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_poll_rebooting_instances(self):
instances = [self._get_running_instance()]
self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
def test_migrate_disk_and_power_off(self):
instance_ref, network_info = self._get_running_instance()
flavor_ref = test_utils.get_test_flavor()
self.connection.migrate_disk_and_power_off(
self.ctxt, instance_ref, 'dest_host', flavor_ref,
network_info)
@catch_notimplementederror
def test_power_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
@catch_notimplementederror
def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(self.ctxt, instance_ref,
network_info, None)
@catch_notimplementederror
def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(self.ctxt, instance_ref, network_info, None)
@catch_notimplementederror
def test_trigger_crash_dump(self):
instance_ref, network_info = self._get_running_instance()
self.connection.trigger_crash_dump(instance_ref)
@catch_notimplementederror
def test_soft_delete(self):
instance_ref, network_info = self._get_running_instance(obj=True)
self.connection.soft_delete(instance_ref)
@catch_notimplementederror
def test_restore_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_restore_soft_deleted(self):
instance_ref, network_info = self._get_running_instance()
self.connection.soft_delete(instance_ref)
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@catch_notimplementederror
def test_unpause_unpaused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_unpause_paused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_suspend(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(self.ctxt, instance_ref)
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(self.ctxt, instance_ref, network_info)
@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(self.ctxt, instance_ref)
self.connection.resume(self.ctxt, instance_ref, network_info)
@catch_notimplementederror
def test_destroy_instance_nonexistent(self):
fake_instance = test_utils.get_test_instance(obj=True)
network_info = test_utils.get_test_network_info()
self.connection.destroy(self.ctxt, fake_instance, network_info)
@catch_notimplementederror
def test_destroy_instance(self):
instance_ref, network_info = self._get_running_instance()
self.assertIn(instance_ref['name'],
self.connection.list_instances())
self.connection.destroy(self.ctxt, instance_ref, network_info)
self.assertNotIn(instance_ref['name'],
self.connection.list_instances())
@catch_notimplementederror
def test_get_volume_connector(self):
result = self.connection.get_volume_connector({'id': 'fake'})
self.assertIn('ip', result)
self.assertIn('initiator', result)
self.assertIn('host', result)
@catch_notimplementederror
def test_get_volume_connector_storage_ip(self):
ip = 'my_ip'
storage_ip = 'storage_ip'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
result = self.connection.get_volume_connector({'id': 'fake'})
self.assertIn('ip', result)
self.assertIn('initiator', result)
self.assertIn('host', result)
self.assertEqual(storage_ip, result['ip'])
@catch_notimplementederror
def test_attach_detach_volume(self):
instance_ref, network_info = self._get_running_instance()
connection_info = {
"driver_volume_type": "fake",
"serial": "fake_serial",
"data": {}
}
self.assertIsNone(
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda'))
self.assertIsNone(
self.connection.detach_volume(connection_info, instance_ref,
'/dev/sda'))
@catch_notimplementederror
def test_swap_volume(self):
instance_ref, network_info = self._get_running_instance()
self.assertIsNone(
self.connection.attach_volume(None, {'driver_volume_type': 'fake',
'data': {}},
instance_ref,
'/dev/sda'))
self.assertIsNone(
self.connection.swap_volume({'driver_volume_type': 'fake',
'data': {}},
{'driver_volume_type': 'fake',
'data': {}},
instance_ref,
'/dev/sda', 2))
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
instance_ref, network_info = self._get_running_instance()
connection_info = {
"driver_volume_type": "fake",
"serial": "fake_serial",
"data": {}
}
self.connection.power_off(instance_ref)
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda')
bdm = {
'root_device_name': None,
'swap': None,
'ephemerals': [],
'block_device_mapping': driver_block_device.convert_volumes([
objects.BlockDeviceMapping(
self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'instance_uuid': instance_ref['uuid'],
'device_name': '/dev/sda',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
})),
])
}
bdm['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'fake', 'data': {}})
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'):
self.connection.power_on(
self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume(connection_info,
instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_get_info(self):
instance_ref, network_info = self._get_running_instance()
info = self.connection.get_info(instance_ref)
self.assertIsInstance(info, hardware.InstanceInfo)
@catch_notimplementederror
def test_get_info_for_unknown_instance(self):
fake_instance = test_utils.get_test_instance(obj=True)
self.assertRaises(exception.NotFound,
self.connection.get_info,
fake_instance)
@catch_notimplementederror
def test_get_diagnostics(self):
instance_ref, network_info = self._get_running_instance(obj=True)
self.connection.get_diagnostics(instance_ref)
@catch_notimplementederror
def test_get_instance_diagnostics(self):
instance_ref, network_info = self._get_running_instance(obj=True)
instance_ref['launched_at'] = timeutils.utcnow()
self.connection.get_instance_diagnostics(instance_ref)
@catch_notimplementederror
def test_block_stats(self):
instance_ref, network_info = self._get_running_instance()
stats = self.connection.block_stats(instance_ref, 'someid')
self.assertEqual(len(stats), 5)
@catch_notimplementederror
def test_get_console_output(self):
fake_libvirt_utils.files['dummy.log'] = ''
instance_ref, network_info = self._get_running_instance()
console_output = self.connection.get_console_output(self.ctxt,
instance_ref)
self.assertIsInstance(console_output, six.string_types)
@catch_notimplementederror
def test_get_vnc_console(self):
instance, network_info = self._get_running_instance(obj=True)
vnc_console = self.connection.get_vnc_console(self.ctxt, instance)
self.assertIsInstance(vnc_console, ctype.ConsoleVNC)
@catch_notimplementederror
def test_get_spice_console(self):
instance_ref, network_info = self._get_running_instance()
spice_console = self.connection.get_spice_console(self.ctxt,
instance_ref)
self.assertIsInstance(spice_console, ctype.ConsoleSpice)
@catch_notimplementederror
def test_get_rdp_console(self):
instance_ref, network_info = self._get_running_instance()
rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref)
self.assertIsInstance(rdp_console, ctype.ConsoleRDP)
@catch_notimplementederror
def test_get_serial_console(self):
instance_ref, network_info = self._get_running_instance()
serial_console = self.connection.get_serial_console(self.ctxt,
instance_ref)
self.assertIsInstance(serial_console, ctype.ConsoleSerial)
@catch_notimplementederror
def test_get_mks_console(self):
instance_ref, network_info = self._get_running_instance()
mks_console = self.connection.get_mks_console(self.ctxt,
instance_ref)
self.assertIsInstance(mks_console, ctype.ConsoleMKS)
@catch_notimplementederror
def test_get_console_pool_info(self):
instance_ref, network_info = self._get_running_instance()
console_pool = self.connection.get_console_pool_info(instance_ref)
self.assertIn('address', console_pool)
self.assertIn('username', console_pool)
self.assertIn('password', console_pool)
@catch_notimplementederror
def test_refresh_security_group_rules(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_security_group_rules(1)
@catch_notimplementederror
def test_refresh_instance_security_rules(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_instance_security_rules(instance_ref)
@catch_notimplementederror
def test_ensure_filtering_for_instance(self):
instance = test_utils.get_test_instance(obj=True)
network_info = test_utils.get_test_network_info()
self.connection.ensure_filtering_rules_for_instance(instance,
network_info)
@catch_notimplementederror
def test_unfilter_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
self.connection.unfilter_instance(instance_ref, network_info)
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
fake_context = context.RequestContext('fake', 'fake')
migration = objects.Migration(context=fake_context, id=1)
migrate_data = objects.LibvirtLiveMigrateData(
migration=migration, bdms=[], block_migration=False)
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
lambda *a: None, lambda *a: None,
migrate_data=migrate_data)
@catch_notimplementederror
def test_live_migration_force_complete(self):
instance_ref, network_info = self._get_running_instance()
self.connection.live_migration_force_complete(instance_ref)
@catch_notimplementederror
def test_live_migration_abort(self):
instance_ref, network_info = self._get_running_instance()
self.connection.live_migration_abort(instance_ref)
@catch_notimplementederror
def _check_available_resource_fields(self, host_status):
keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
'hypervisor_hostname', 'cpu_info', 'disk_available_least',
'supported_instances']
for key in keys:
self.assertIn(key, host_status)
self.assertIsInstance(host_status['hypervisor_version'], int)
@catch_notimplementederror
def test_get_available_resource(self):
available_resource = self.connection.get_available_resource(
'myhostname')
self._check_available_resource_fields(available_resource)
@catch_notimplementederror
def test_get_available_nodes(self):
self.connection.get_available_nodes(False)
@catch_notimplementederror
def _check_host_cpu_status_fields(self, host_cpu_status):
self.assertIn('kernel', host_cpu_status)
self.assertIn('idle', host_cpu_status)
self.assertIn('user', host_cpu_status)
self.assertIn('iowait', host_cpu_status)
self.assertIn('frequency', host_cpu_status)
@catch_notimplementederror
def test_get_host_cpu_stats(self):
host_cpu_status = self.connection.get_host_cpu_stats()
self._check_host_cpu_status_fields(host_cpu_status)
@catch_notimplementederror
def test_set_host_enabled(self):
self.connection.set_host_enabled(True)
@catch_notimplementederror
def test_get_host_uptime(self):
self.connection.get_host_uptime()
@catch_notimplementederror
def test_host_power_action_reboot(self):
self.connection.host_power_action('reboot')
@catch_notimplementederror
def test_host_power_action_shutdown(self):
self.connection.host_power_action('shutdown')
@catch_notimplementederror
def test_host_power_action_startup(self):
self.connection.host_power_action('startup')
@catch_notimplementederror
def test_add_to_aggregate(self):
self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
@catch_notimplementederror
def test_remove_from_aggregate(self):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
def test_events(self):
got_events = []
def handler(event):
got_events.append(event)
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
self.connection.emit_event(event1)
self.connection.emit_event(event2)
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
self.connection.emit_event(event3)
self.connection.emit_event(event4)
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_bad_object(self):
# Passing in something which does not inherit
# from virtevent.Event
def handler(event):
pass
self.connection.register_event_listener(handler)
badevent = {
"foo": "bar"
}
self.assertRaises(ValueError,
self.connection.emit_event,
badevent)
def test_event_bad_callback(self):
# Check that if a callback raises an exception,
# it does not propagate back out of the
# 'emit_event' call
def handler(event):
raise Exception("Hit Me!")
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
self.connection.emit_event(event1)
def test_set_bootable(self):
self.assertRaises(NotImplementedError, self.connection.set_bootable,
'instance', True)
@catch_notimplementederror
def test_get_instance_disk_info(self):
# This should be implemented by any driver that supports live migrate.
instance_ref, network_info = self._get_running_instance()
self.connection.get_instance_disk_info(instance_ref,
block_device_info={})
@catch_notimplementederror
def test_get_device_name_for_instance(self):
instance, _ = self._get_running_instance()
self.connection.get_device_name_for_instance(
instance, [], mock.Mock(spec=objects.BlockDeviceMapping))
def test_network_binding_host_id(self):
# NOTE(jroll) self._get_running_instance calls spawn(), so we can't
# use it to test this method. Make a simple object instead; we just
# need instance.host.
instance = objects.Instance(self.ctxt, host='somehost')
self.assertEqual(instance.host,
self.connection.network_binding_host_id(self.ctxt, instance))
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
def test_live_migration(self):
self.skipTest('Live migration is not implemented in the base '
'virt driver.')
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
fake.set_nodes(['myhostname'])
super(FakeConnectionTestCase, self).setUp()
def _check_available_resource_fields(self, host_status):
super(FakeConnectionTestCase, self)._check_available_resource_fields(
host_status)
hypervisor_type = host_status['hypervisor_type']
supported_instances = host_status['supported_instances']
try:
# supported_instances could be JSON wrapped
supported_instances = jsonutils.loads(supported_instances)
except TypeError:
pass
self.assertTrue(any(hypervisor_type in x for x in supported_instances))
class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
super(LibvirtConnTestCase, self).setUp()
self.stubs.Set(self.connection,
'_set_host_enabled', mock.MagicMock())
self.useFixture(fixtures.MonkeyPatch(
'nova.context.get_admin_context',
self._fake_admin_context))
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
def _fake_admin_context(self, *args, **kwargs):
return self.ctxt
def test_force_hard_reboot(self):
self.flags(wait_soft_reboot_seconds=0, group='libvirt')
self.test_reboot()
def test_migrate_disk_and_power_off(self):
# there is lack of fake stuff to execute this method. so pass.
self.skipTest("Test nothing, but this method"
" needed to override superclass.")
def test_internal_set_host_enabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: False
service_mock.configure_mock(disabled_reason='None',
disabled=False)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!')
def test_set_host_enabled_when_auto_disabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: True, 'AUTO: ERROR'
service_mock.configure_mock(disabled_reason='AUTO: ERROR',
disabled=True)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertFalse(service_mock.disabled)
self.assertIsNone(service_mock.disabled_reason)
def test_set_host_enabled_when_manually_disabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
def test_set_host_enabled_dont_override_manually_disabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
@catch_notimplementederror
@mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
instance_ref, network_info = self._get_running_instance()
self.connection.cleanup(self.ctxt, instance_ref, network_info,
destroy_vifs=False)
self.assertEqual(unplug_vifs_mock.call_count, 0)
@catch_notimplementederror
@mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
instance_ref, network_info = self._get_running_instance()
self.connection.cleanup(self.ctxt, instance_ref, network_info,
destroy_vifs=True)
self.assertEqual(unplug_vifs_mock.call_count, 1)
unplug_vifs_mock.assert_called_once_with(instance_ref,
network_info, True)
def test_get_device_name_for_instance(self):
self.skipTest("Tested by the nova.tests.unit.virt.libvirt suite")
@catch_notimplementederror
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch("nova.virt.libvirt.host.Host.has_min_version")
def test_set_admin_password(self, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
instance, network_info = self._get_running_instance(obj=True)
self.connection.set_admin_password(instance, 'p4ssw0rd')
|
CoherentLabs/depot_tools
|
refs/heads/master
|
third_party/boto/ses/__init__.py
|
72
|
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Harry Marr http://hmarr.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from connection import SESConnection
from boto.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the SES service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return [RegionInfo(name='us-east-1',
endpoint='email.us-east-1.amazonaws.com',
connection_cls=SESConnection)]
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ses.connection.SESConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.ses.connection.SESConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
donspaulding/adspygoogle
|
refs/heads/master
|
examples/adspygoogle/dfp/v201203/get_order.py
|
2
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets an order by its id. To determine which orders exist,
run get_all_orders.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService(
'OrderService', 'https://www.google.com', 'v201203')
# Set the id of the order to get.
order_id = 'INSERT_ORDER_ID_HERE'
# Get order.
order = order_service.GetOrder(order_id)[0]
# Display results.
print ('Order with id \'%s\', name \'%s\', and advertiser id \'%s\' was '
'found.' % (order['id'], order['name'], order['advertiserId']))
|
PetePriority/home-assistant
|
refs/heads/dev
|
homeassistant/components/zha/core/helpers.py
|
1
|
"""
Helpers for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
import logging
from .const import (
DEFAULT_BAUDRATE, REPORT_CONFIG_MAX_INT, REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_RPT_CHANGE, RadioType)
_LOGGER = logging.getLogger(__name__)
async def safe_read(cluster, attributes, allow_cache=True, only_cache=False,
manufacturer=None):
"""Swallow all exceptions from network read.
If we throw during initialization, setup fails. Rather have an entity that
exists, but is in a maybe wrong state, than no entity. This method should
probably only be used during initialization.
"""
try:
result, _ = await cluster.read_attributes(
attributes,
allow_cache=allow_cache,
only_cache=only_cache,
manufacturer=manufacturer
)
return result
except Exception: # pylint: disable=broad-except
return {}
async def bind_cluster(entity_id, cluster):
"""Bind a zigbee cluster.
This also swallows DeliveryError exceptions that are thrown when devices
are unreachable.
"""
from zigpy.exceptions import DeliveryError
cluster_name = cluster.ep_attribute
try:
res = await cluster.bind()
_LOGGER.debug(
"%s: bound '%s' cluster: %s", entity_id, cluster_name, res[0]
)
except DeliveryError as ex:
_LOGGER.debug(
"%s: Failed to bind '%s' cluster: %s",
entity_id, cluster_name, str(ex)
)
async def configure_reporting(entity_id, cluster, attr,
min_report=REPORT_CONFIG_MIN_INT,
max_report=REPORT_CONFIG_MAX_INT,
reportable_change=REPORT_CONFIG_RPT_CHANGE,
manufacturer=None):
"""Configure attribute reporting for a cluster.
This also swallows DeliveryError exceptions that are thrown when devices
are unreachable.
"""
from zigpy.exceptions import DeliveryError
attr_name = cluster.attributes.get(attr, [attr])[0]
attr_id = get_attr_id_by_name(cluster, attr_name)
cluster_name = cluster.ep_attribute
kwargs = {}
if manufacturer:
kwargs['manufacturer'] = manufacturer
try:
res = await cluster.configure_reporting(attr_id, min_report,
max_report, reportable_change,
**kwargs)
_LOGGER.debug(
"%s: reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'",
entity_id, attr_name, cluster_name, min_report, max_report,
reportable_change, res
)
except DeliveryError as ex:
_LOGGER.debug(
"%s: failed to set reporting for '%s' attr on '%s' cluster: %s",
entity_id, attr_name, cluster_name, str(ex)
)
async def bind_configure_reporting(entity_id, cluster, attr, skip_bind=False,
min_report=REPORT_CONFIG_MIN_INT,
max_report=REPORT_CONFIG_MAX_INT,
reportable_change=REPORT_CONFIG_RPT_CHANGE,
manufacturer=None):
"""Bind and configure zigbee attribute reporting for a cluster.
This also swallows DeliveryError exceptions that are thrown when devices
are unreachable.
"""
if not skip_bind:
await bind_cluster(entity_id, cluster)
await configure_reporting(entity_id, cluster, attr,
min_report=min_report,
max_report=max_report,
reportable_change=reportable_change,
manufacturer=manufacturer)
async def check_zigpy_connection(usb_path, radio_type, database_path):
"""Test zigpy radio connection."""
if radio_type == RadioType.ezsp.name:
import bellows.ezsp
from bellows.zigbee.application import ControllerApplication
radio = bellows.ezsp.EZSP()
elif radio_type == RadioType.xbee.name:
import zigpy_xbee.api
from zigpy_xbee.zigbee.application import ControllerApplication
radio = zigpy_xbee.api.XBee()
elif radio_type == RadioType.deconz.name:
import zigpy_deconz.api
from zigpy_deconz.zigbee.application import ControllerApplication
radio = zigpy_deconz.api.Deconz()
try:
await radio.connect(usb_path, DEFAULT_BAUDRATE)
controller = ControllerApplication(radio, database_path)
await asyncio.wait_for(controller.startup(auto_form=True), timeout=30)
radio.close()
except Exception: # pylint: disable=broad-except
return False
return True
def convert_ieee(ieee_str):
"""Convert given ieee string to EUI64."""
from zigpy.types import EUI64, uint8_t
return EUI64([uint8_t(p, base=16) for p in ieee_str.split(':')])
def construct_unique_id(cluster):
"""Construct a unique id from a cluster."""
return "0x{:04x}:{}:0x{:04x}".format(
cluster.endpoint.device.nwk,
cluster.endpoint.endpoint_id,
cluster.cluster_id
)
def get_attr_id_by_name(cluster, attr_name):
"""Get the attribute id for a cluster attribute by its name."""
return next((attrid for attrid, (attrname, datatype) in
cluster.attributes.items() if attr_name == attrname), None)
|
attm2x/m2x-python
|
refs/heads/master
|
m2x/tests/common.py
|
1
|
from m2x.client import M2XClient
from m2x.api import APIBase, Response
class DummyResponse(Response):
def __init__(self, response):
super(DummyResponse, self).__init__(
response=response,
raw=response,
status=response.status_code,
headers=response.headers,
json=response.json
)
class _Response(object):
def __init__(self, request):
self.request = request
self.url = request.url
self.method = request.method
self.apikey = request.apikey
self.kwargs = request.kwargs
self.status_code = 200
self.headers = request.kwargs
self.json = None
class DummyRequest(object):
def __init__(self, url, method, apikey, **kwargs):
self.url = url
self.method = method
self.apikey = apikey
self.kwargs = kwargs
def response(self):
return _Response(self)
class DummyAPI(APIBase):
PATH = '/dummy'
def client_endpoint(self):
return self.client.endpoint
def request(self, path, apikey=None, method='GET', **kwargs):
apikey = apikey or self.apikey
request = DummyRequest(self.url(path), method, apikey, **kwargs)
response = request.response()
self.last_response = DummyResponse(response)
return response
class DummyClient(M2XClient):
ENDPOINT = 'http://api.m2x.com'
def __init__(self, key, api=DummyAPI, endpoint=None, **kwargs):
super(DummyClient, self).__init__(key, api, endpoint, **kwargs)
|
rednach/krill
|
refs/heads/master
|
test/test_freshness.py
|
18
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestFreshness(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_freshness.cfg')
# Check if the check_freshnes is doing it's job
def test_check_freshness(self):
self.print_header()
# We want an eventhandelr (the perfdata command) to be put in the actions dict
# after we got a service check
now = time.time()
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.active_checks_enabled = False
self.assertEqual(True, svc.check_freshness)
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
# We do not want to be just a string but a real command
print "Additonal freshness latency", svc.__class__.additional_freshness_latency
self.scheduler_loop(1, [[svc, 0, 'OK | bibi=99%']])
print "Addi:", svc.last_state_update, svc.freshness_threshold, svc.check_freshness
# By default check fresh ness is set at false, so no new checks
self.assertEqual(0, len(svc.actions))
svc.do_check_freshness()
self.assertEqual(0, len(svc.actions))
# We make it 10s less than it was
svc.last_state_update = svc.last_state_update - 10
#svc.check_freshness = True
# Now we active it, with a too small value (now - 10s is still higer than now - (1 - 15, the addition time)
# So still no check
svc.freshness_threshold = 1
print "Addi:", svc.last_state_update, svc.freshness_threshold, svc.check_freshness
svc.do_check_freshness()
self.assertEqual(0, len(svc.actions))
# Now active globaly the check freshness
cmd = "[%lu] ENABLE_SERVICE_FRESHNESS_CHECKS" % now
self.sched.run_external_command(cmd)
# Ok, now, we remove again 10s. Here we will saw the new entry
svc.last_state_update = svc.last_state_update - 10
svc.do_check_freshness()
self.assertEqual(1, len(svc.actions))
# And we check for the message in the log too
self.assert_any_log_match('The results of service.*')
if __name__ == '__main__':
unittest.main()
|
eddiel/Play
|
refs/heads/master
|
applications/welcome/languages/uk.py
|
134
|
# coding: utf8
{
'!langcode!': 'uk',
'!langname!': 'Українська',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Оновити" це додатковий вираз, такий, як "field1=\'нове_значення\'". Ви не можете змінювати або вилучати дані об\'єднаних таблиць.',
'%d days ago': '%d %%{день} тому',
'%d hours ago': '%d %%{годину} тому',
'%d minutes ago': '%d %%{хвилину} тому',
'%d months ago': '%d %%{місяць} тому',
'%d secods ago': '%d %%{секунду} тому',
'%d weeks ago': '%d %%{тиждень} тому',
'%d years ago': '%d %%{рік} тому',
'%s %%{row} deleted': 'Вилучено %s %%{рядок}',
'%s %%{row} updated': 'Змінено %s %%{рядок}',
'%s selected': 'Вибрано %s %%{запис}',
'%Y-%m-%d': '%Y/%m/%d',
'%Y-%m-%d %H:%M:%S': '%Y/%m/%d %H:%M:%S',
'1 day ago': '1 день тому',
'1 hour ago': '1 годину тому',
'1 minute ago': '1 хвилину тому',
'1 month ago': '1 місяць тому',
'1 second ago': '1 секунду тому',
'1 week ago': '1 тиждень тому',
'1 year ago': '1 рік тому',
'@markmin\x01(**%.0d MB**)': '(**``%.0d``:red МБ**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{елемент(items)}, **%(bytes)s** %%{байт(bytes)}',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**нема в наявності**``:red (потребує Пітонівської бібліотеки [[guppy [посилання відкриється у новому вікні] http://pypi.python.org/pypi/guppy/ popup]])',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Сталась помилка, будь-ласка [[перевантажте %s]] сторінку',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': "Час життя об'єктів в КЕШІ сягає **%(hours)02d** %%{годину(hours)} **%(min)02d** %%{хвилину(min)} та **%(sec)02d** %%{секунду(sec)}.",
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': "Час життя об'єктів в ДИСКОВОМУ КЕШІ сягає **%(hours)02d** %%{годину(hours)} **%(min)02d** %%{хвилину(min)} та **%(sec)02d** %%{секунду(sec)}.",
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Оцінка поцілювання: **%(ratio)s%%** (**%(hits)s** %%{поцілювання(hits)} та **%(misses)s** %%{схибнення(misses)})',
'@markmin\x01Number of entries: **%s**': 'Кількість входжень: ``**%s**``:red',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': "Час життя об'єктів в ОЗП-КЕШІ сягає **%(hours)02d** %%{годину(hours)} **%(min)02d** %%{хвилину(min)} та **%(sec)02d** %%{секунду(sec)}.",
'About': 'Про додаток',
'Access Control': 'Контроль доступу',
'Administrative Interface': 'Адміністративний інтерфейс',
'Ajax Recipes': 'Рецепти для Ajax',
'appadmin is disabled because insecure channel': 'використовується незахищенний канал (HTTP). Appadmin вимкнено',
'Are you sure you want to delete this object?': "Ви впевнені, що хочете вилучити цей об'єкт?",
'Available Databases and Tables': 'Доступні бази даних та таблиці',
'Buy this book': 'Купити книжку',
'cache': 'кеш',
'Cache': 'Кеш',
'Cache Keys': 'Ключі кешу',
'Cannot be empty': 'Порожнє значення неприпустиме',
'Change password': 'Змінити пароль',
'Check to delete': 'Позначити для вилучення',
'Check to delete:': 'Позначте для вилучення:',
'Clear CACHE?': 'Очистити ВЕСЬ кеш?',
'Clear DISK': 'Очистити ДИСКОВИЙ кеш',
'Clear RAM': "Очистити кеш В ПАМ'ЯТІ",
'Client IP': 'IP клієнта',
'Community': 'Спільнота',
'Components and Plugins': 'Компоненти та втулки',
'Controller': 'Контролер',
'Copyright': 'Правовласник',
'Created By': 'Створив(ла)',
'Created On': 'Створено в',
'Current request': 'Поточний запит (current request)',
'Current response': 'Поточна відповідь (current response)',
'Current session': 'Поточна сесія (current session)',
'customize me!': 'причепуріть мене!',
'data uploaded': 'дані завантажено',
'Database': 'База даних',
'Database %s select': 'Вибірка з бази даних %s',
'Database Administration (appadmin)': 'Адміністрування Бази Даних (appadmin)',
'db': 'база даних',
'DB Model': 'Модель БД',
'Delete:': 'Вилучити:',
'Demo': 'Демо',
'Deployment Recipes': 'Способи розгортання',
'Description': 'Опис',
'design': 'налаштування',
'DISK': 'ДИСК',
'Disk Cache Keys': 'Ключі дискового кешу',
'Disk Cleared': 'Дисковий кеш очищено',
'Documentation': 'Документація',
"Don't know what to do?": 'Не знаєте що робити далі?',
'done!': 'зроблено!',
'Download': 'Завантажити',
'E-mail': 'Ел.пошта',
'edit': 'редагувати',
'Edit current record': 'Редагувати поточний запис',
'Edit Page': 'Редагувати сторінку',
'Email and SMS': 'Ел.пошта та SMS',
'enter a value': 'введіть значення',
'enter an integer between %(min)g and %(max)g': 'введіть ціле число між %(min)g та %(max)g',
'Error!': 'Помилка!',
'Errors': 'Помилки',
'Errors in form, please check it out.': 'У формі є помилка. Виправте її, будь-ласка.',
'export as csv file': 'експортувати як файл csv',
'FAQ': 'ЧаПи (FAQ)',
'First name': "Ім'я",
'Forgot username?': "Забули ім'я користувача?",
'Forms and Validators': 'Форми та коректність даних',
'Free Applications': 'Вільні додатки',
'Graph Model': 'Графова Модель',
'Group %(group_id)s created': 'Групу %(group_id)s створено',
'Group ID': 'Ідентифікатор групи',
'Group uniquely assigned to user %(id)s': "Група унікально зв'язана з користувачем %(id)s",
'Groups': 'Групи',
'Hello World': 'Привіт, світ!',
'Home': 'Початок',
'How did you get here?': 'Як цього було досягнуто?',
'import': 'Імпортувати',
'Import/Export': 'Імпорт/Експорт',
'insert new': 'Створити новий запис',
'insert new %s': 'створити новий запис %s',
'Internal State': 'Внутрішній стан',
'Introduction': 'Введення',
'Invalid email': 'Невірна адреса ел.пошти',
'Invalid login': "Невірне ім'я користувача",
'Invalid password': 'Невірний пароль',
'Invalid Query': 'Помилковий запит',
'invalid request': 'хибний запит',
'Is Active': 'Активна',
'Key': 'Ключ',
'Last name': 'Прізвище',
'Layout': 'Макет (Layout)',
'Layout Plugins': 'Втулки макетів',
'Layouts': 'Макети',
'Live Chat': 'Чат',
'Logged in': 'Вхід здійснено',
'Logged out': 'Вихід здійснено',
'Login': 'Вхід',
'Logout': 'Вихід',
'Lost Password': 'Забули пароль',
'Lost password?': 'Забули пароль?',
'Manage Cache': 'Управління кешем',
'Menu Model': 'Модель меню',
'Modified By': 'Зміни провадив(ла)',
'Modified On': 'Змінено в',
'My Sites': 'Сайт (усі додатки)',
'Name': "Ім'я",
'New password': 'Новий пароль',
'New Record': 'Новий запис',
'new record inserted': 'новий рядок додано',
'next 100 rows': 'наступні 100 рядків',
'No databases in this application': 'Даний додаток не використовує базу даних',
'now': 'зараз',
'Object or table name': "Об'єкт або назва таблиці",
'Old password': 'Старий пароль',
'Online examples': 'Зразковий демо-сайт',
'or import from csv file': 'або імпортувати з csv-файлу',
'Origin': 'Походження',
'Other Plugins': 'Інші втулки',
'Other Recipes': 'Інші рецепти',
'Overview': 'Огляд',
'Page Not Found!': 'Сторінку не знайдено!',
'Page saved': 'Сторінку збережено',
'Password': 'Пароль',
'Password changed': 'Пароль змінено',
"Password fields don't match": 'Пароль не співпав',
'please input your password again': 'Будь-ласка введіть пароль ще раз',
'Plugins': 'Втулки (Plugins)',
'Powered by': 'Працює на',
'Preface': 'Передмова',
'previous 100 rows': 'попередні 100 рядків',
'Profile': 'Параметри',
'Profile updated': 'Параметри змінено',
'pygraphviz library not found': 'Бібліотека pygraphviz не знайдена (не встановлена)',
'Python': 'Мова Python',
'Query:': 'Запит:',
'Quick Examples': 'Швидкі приклади',
'RAM': "ОПЕРАТИВНА ПАМ'ЯТЬ (ОЗП)",
'RAM Cache Keys': 'Ключі ОЗП-кешу',
'Ram Cleared': 'ОЗП-кеш очищено',
'Recipes': 'Рецепти',
'Record': 'запис',
'Record %(id)s updated': 'Запис %(id)s змінено',
'record does not exist': 'запису не існує',
'Record ID': 'Ід.запису',
'Record id': 'ід. запису',
'Record Updated': 'Запис змінено',
'Register': 'Реєстрація',
'Registration identifier': 'Реєстраційний ідентифікатор',
'Registration key': 'Реєстраційний ключ',
'Registration successful': 'Реєстрація пройшла успішно',
'Remember me (for 30 days)': "Запам'ятати мене (на 30 днів)",
'Request reset password': 'Запит на зміну пароля',
'Reset Password key': 'Ключ скидання пароля',
'Role': 'Роль',
'Rows in Table': 'Рядки в таблиці',
'Rows selected': 'Відмічено рядків',
'Save profile': 'Зберегти параметри',
'Semantic': 'Семантика',
'Services': 'Сервіс',
'Size of cache:': 'Розмір кешу:',
'state': 'стан',
'Statistics': 'Статистика',
'Stylesheet': 'CSS-стилі',
'submit': 'застосувати',
'Submit': 'Застосувати',
'Support': 'Підтримка',
'Table': 'Таблиця',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Запит" це умова, на зразок "db.table1.field1==\'значення\'". Вираз "db.table1.field1==db.table2.field2" повертає результат об\'єднання (SQL JOIN) таблиць.',
'The Core': 'Ядро',
'The output of the file is a dictionary that was rendered by the view %s': 'Результат функції - словник пар (назва=значення) було відображено з допомогою відображення (view) %s',
'The Views': 'Відображення (Views)',
'This App': 'Цей додаток',
'This email already has an account': 'Вказана адреса ел.пошти вже зареєстрована',
'Time in Cache (h:m:s)': 'Час знаходження в кеші (h:m:s)',
'Timestamp': 'Відмітка часу',
'too short': 'Занадто короткий',
'Twitter': 'Твіттер',
'unable to parse csv file': 'не вдається розібрати csv-файл',
'Update:': 'Оновити:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Для створення складних запитів використовуйте (...)&(...) замість AND, (...)|(...) замість OR, та ~(...) замість NOT.',
'User %(id)s Logged-in': 'Користувач %(id)s увійшов',
'User %(id)s Logged-out': 'Користувач %(id)s вийшов',
'User %(id)s Password changed': 'Користувач %(id)s змінив свій пароль',
'User %(id)s Password reset': 'Користувач %(id)s скинув пароль',
'User %(id)s Profile updated': 'Параметри користувача %(id)s змінено',
'User %(id)s Registered': 'Користувач %(id)s зареєструвався',
'User ID': 'Ід.користувача',
'value already in database or empty': 'значення вже в базі даних або порожнє',
'Verify Password': 'Повторити пароль',
'Videos': 'Відео',
'View': 'Відображення (View)',
'Welcome': 'Ласкаво просимо',
'Welcome to web2py!': 'Ласкаво просимо до web2py!',
'Which called the function %s located in the file %s': 'Управління передалось функції %s, яка розташована у файлі %s',
'Working...': 'Працюємо...',
'You are successfully running web2py': 'Ви успішно запустили web2py',
'You can modify this application and adapt it to your needs': 'Ви можете модифікувати цей додаток і адаптувати його до своїх потреб',
'You visited the url %s': 'Ви відвідали наступну адресу: %s',
}
|
acsone/website
|
refs/heads/8.0
|
website_backend_views/__init__.py
|
71
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import model
from . import controllers
|
mwarkentin/django-watchman
|
refs/heads/master
|
sample_project/sample_project/wsgi.py
|
2
|
"""
WSGI config for sample_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sample_project.settings")
application = get_wsgi_application()
|
ravello/ansible
|
refs/heads/devel
|
v2/samples/multi_queues.py
|
38
|
#!/usr/bin/env python
import sys
import time
import Queue
import traceback
import multiprocessing
from ansible.inventory import Inventory
from ansible.inventory.host import Host
from ansible.playbook.play import Play
from ansible.playbook.task import Task
from ansible.executor.connection_info import ConnectionInformation
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.parsing import DataLoader
from ansible.vars import VariableManager
from ansible.utils.debug import debug
NUM_WORKERS = 20
NUM_HOSTS = 1778
NUM_TASKS = 1
def results(final_q, workers):
cur_worker = 0
def _read_worker_result(cur_worker):
result = None
starting_point = cur_worker
while True:
(worker_prc, main_q, res_q) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
try:
if not res_q.empty():
debug("worker %d has data to read" % cur_worker)
result = res_q.get()
debug("got a result from worker %d: %s" % (cur_worker, result))
break
except:
pass
if cur_worker == starting_point:
break
return (result, cur_worker)
while True:
result = None
try:
(result, cur_worker) = _read_worker_result(cur_worker)
if result is None:
time.sleep(0.01)
continue
final_q.put(result, block=False)
except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
except Exception as e:
debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e)
traceback.print_exc()
break
def worker(main_q, res_q, loader):
while True:
task = None
try:
if not main_q.empty():
(host, task, task_vars, conn_info) = main_q.get(block=False)
executor_result = TaskExecutor(host, task, task_vars, conn_info, loader).run()
debug("executor result: %s" % executor_result)
task_result = TaskResult(host, task, executor_result)
res_q.put(task_result)
else:
time.sleep(0.01)
except Queue.Empty:
pass
except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
except Exception as e:
debug("EXCEPTION DURING WORKER PROCESSING: %s" % e)
traceback.print_exc()
break
loader = DataLoader()
workers = []
for i in range(NUM_WORKERS):
main_q = multiprocessing.Queue()
res_q = multiprocessing.Queue()
worker_p = multiprocessing.Process(target=worker, args=(main_q, res_q, loader))
worker_p.start()
workers.append((worker_p, main_q, res_q))
res_q = multiprocessing.Queue()
res_p = multiprocessing.Process(target=results, args=(res_q, workers))
res_p.start()
def send_data(obj):
global cur_worker
global workers
global pending_results
(w_proc, main_q, wrkr_q) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
pending_results += 1
main_q.put(obj, block=False)
def _process_pending_results():
global res_q
global pending_results
while not res_q.empty():
try:
result = res_q.get(block=False)
debug("got final result: %s" % (result,))
pending_results -= 1
except Queue.Empty:
pass
def _wait_on_pending_results():
global pending_results
while pending_results > 0:
debug("waiting for pending results (%d left)" % pending_results)
_process_pending_results()
time.sleep(0.01)
debug("starting")
cur_worker = 0
pending_results = 0
var_manager = VariableManager()
debug("loading inventory")
inventory = Inventory(host_list='/tmp/med_inventory', loader=loader, variable_manager=var_manager)
hosts = inventory.get_hosts()[:]
debug("done loading inventory")
ci = ConnectionInformation()
ci.connection = 'local'
for i in range(NUM_TASKS):
#for j in range(NUM_HOSTS):
for h in hosts:
debug("queuing %s %d" % (h, i))
#h = Host(name="host%06d" % j)
t = Task().load(dict(name="task %d" % (i,), debug="msg='hello from %s, %d'" % (h,i)))
#t = Task().load(dict(name="task %d" % (i,), ping=""))
#task_vars = var_manager.get_vars(loader=loader, host=h, task=t)
task_vars = dict()
new_t = t.copy()
new_t.post_validate(task_vars)
send_data((h, t, task_vars, ci))
debug("done queuing %s %d" % (h, i))
_process_pending_results()
debug("waiting for the results to drain...")
_wait_on_pending_results()
res_q.close()
res_p.terminate()
for (w_p, main_q, wrkr_q) in workers:
main_q.close()
wrkr_q.close()
w_p.terminate()
debug("done")
|
happeninghq/happening
|
refs/heads/master
|
src/events/migrations/0003_migrate_locations.py
|
2
|
from __future__ import unicode_literals
from django.db import migrations
def migrate_locations(apps, schema_editor):
# {"city": "", "country": "", "longitude": "", "latitude": "", "state": "", "postcode": "", "line_1": "", "line_3": "", "line_2": ""}
Event = apps.get_model('events', 'Event')
for event in Event.objects.all():
if event.location:
if "city" in event.location:
# Old style location
fields = [
event.location[f] for f in ["line_1", "line_2", "line_3", "city", "state", "postcode", "country"] if event.location[f]
]
longitude = event.location["longitude"]
latitude = event.location["latitude"]
event.location = {
"title": ", ".join(fields)
}
if longitude:
event.location["longitude"] = longitude
event.location["latitude"] = latitude
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20170422_1128'),
]
operations = [
migrations.RunPython(migrate_locations, migrations.RunPython.noop),
]
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/comment_tests/urls_default.py
|
133
|
from django.conf.urls import patterns, include
urlpatterns = patterns('',
(r'^', include('django.contrib.comments.urls')),
# Provide the auth system login and logout views
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
)
|
martinmcbride/pytexture
|
refs/heads/master
|
generativepy/drawing.py
|
1
|
# Author: Martin McBride
# Created: 2018-10-22
# Copyright (C) 2018, Martin McBride
# License: MIT
import cairo
import generativepy.utils
import numpy as np
# Text align
CENTER = 0
MIDDLE = 0
LEFT = 1
RIGHT = 2
TOP = 3
BOTTOM = 4
BASELINE = 5
# Fill rule
EVEN_ODD=0
WINDING=1
## Line cap/join
MITER = 0 # join
ROUND = 1 # join/cap
BEVEL = 2 # join
BUTT = 3 # cap
SQUARE = 4 # cap
## Line extension
SEGMENT = 0
RAY = 1
LINE = 2
def setup(ctx, pixel_width, pixel_height, width=None, height=None, startx=0, starty=0, background=None, flip=False):
'''
Set up the context initial sclaling and background color
:param ctx: The context
:param pixel_width: The device space width
:param pixel_height: The device space width
:param width: The user space width
:param height: The user space width
:param startx: The x offset of the top left corner from the origin
:param starty: The y offset of the top left corner from the origin
:param background: Color of the background
:param flip: If true, user space is flipped in the y direction.
:return:
'''
if not height and not width:
width = pixel_width
height = pixel_height
elif not height:
height = width * pixel_height / pixel_width
elif not width:
width = height * pixel_width / pixel_height
if flip:
ctx.scale(1, -1)
ctx.translate(0, -pixel_height)
ctx.scale(pixel_width / width, pixel_height / height)
ctx.translate(-startx, -starty)
if background:
ctx.set_source_rgba(*background)
ctx.paint()
def make_image(outfile, draw, width, height, channels=3):
'''
Create a PNG file using cairo
:param outfile: Name of output file
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param channels: 3 for rgb, 4 for rgba
:return:
'''
if outfile.lower().endswith('.png'):
outfile = outfile[:-4]
fmt = cairo.FORMAT_ARGB32 if channels==4 else cairo.FORMAT_RGB24
surface = cairo.ImageSurface(fmt, width, height)
ctx = cairo.Context(surface)
draw(ctx, width, height, 0, 1)
surface.write_to_png(outfile + '.png')
def make_images(outfile, draw, width, height, count, channels=3):
'''
Create a sequence of PNG files using cairo
:param outfile: Base name of output files
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param count: number of frames to create
:param channels: 3 for rgb, 4 for rgba
:return: a frame buffer
'''
if outfile.lower().endswith('.png'):
outfile = outfile[:-4]
for i in range(count):
fmt = cairo.FORMAT_ARGB32 if channels==4 else cairo.FORMAT_RGB24
surface = cairo.ImageSurface(fmt, width, height)
ctx = cairo.Context(surface)
draw(ctx, width, height, i, count)
surface.write_to_png(outfile + str(i).zfill(8) + '.png')
def make_image_frames(draw, width, height, count, channels=3):
'''
Create a numpy frame file using cairo
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param count: number of frames to create
:param channels: 3 for rgb, 4 for rgba
:return: a lazy sequence of frame buffers
'''
fmt = cairo.FORMAT_ARGB32 if channels==4 else cairo.FORMAT_RGB24
for i in range(count):
surface = cairo.ImageSurface(fmt, width, height)
ctx = cairo.Context(surface)
draw(ctx, width, height, i, count)
buf = surface.get_data()
a = np.frombuffer(buf, np.uint8)
a.shape = (height, width, 4)
a = generativepy.utils.correct_pycairo_byte_order(a, channels)
yield a
def make_image_frame(draw, width, height, channels=3):
'''
Create a numpy frame file using cairo
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param channels: 3 for rgb, 4 for rgba
:return:
'''
fmt = cairo.FORMAT_ARGB32 if channels==4 else cairo.FORMAT_RGB24
surface = cairo.ImageSurface(fmt, width, height)
ctx = cairo.Context(surface)
draw(ctx, width, height, 0, 1)
buf = surface.get_data()
a = np.frombuffer(buf, np.uint8)
a.shape = (height, width, 4)
if channels==3:
a[:, :, [0, 1, 2]] = a[:, :, [2, 1, 0]]
elif channels==4:
a[:, :, [0, 1, 2, 3]] = a[:, :, [2, 1, 0, 3]]
return a
def make_svg(outfile, draw, width, height):
'''
Create an SVG file using cairo
:param outfile: Name of output file
:param width: width in pixels, int
:param height: height in pixels, int
:param pixelSize: size in pixels tuple (x, y)
:return:
'''
if outfile.lower().endswith('.svg'):
outfile = outfile[:-4]
surface = cairo.SVGSurface(outfile + '.svg', width, height)
ctx = cairo.Context(surface)
draw(ctx, width, height, 0, 1)
ctx.show_page()
|
webmedic/booker
|
refs/heads/master
|
src/chardet/universaldetector.py
|
47
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
"""
Module containing the UniversalDetector detector class, which is the primary
class a user of ``chardet`` should use.
:author: Mark Pilgrim (intial port to Python)
:author: Shy Shalom (original C code)
:author: Dan Blanchard (major refactoring for 3.0)
:author: Ian Cordasco
"""
import codecs
import logging
import re
from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober
from .mbcsgroupprober import MBCSGroupProber
from .sbcsgroupprober import SBCSGroupProber
class UniversalDetector(object):
"""
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers.
To get a ``dict`` containing an encoding and its confidence, you can simply
run:
.. code::
u = UniversalDetector()
u.feed(some_bytes)
u.close()
detected = u.result
"""
MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
ESC_DETECTOR = re.compile(b'(\033|~{)')
def __init__(self, lang_filter=LanguageFilter.all):
self._esc_charset_prober = None
self._charset_probers = []
self.result = None
self.done = None
self._got_data = None
self._input_state = None
self._last_char = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
self.reset()
def reset(self):
"""
Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents.
"""
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._got_data = False
self._input_state = InputState.pure_ascii
self._last_char = b''
if self._esc_charset_prober:
self._esc_charset_prober.reset()
for prober in self._charset_probers:
prober.reset()
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
if self.done:
return
if not len(byte_str):
return
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif byte_str.startswith(codecs.BOM_UTF32_LE):
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif byte_str.startswith(codecs.BOM_UTF32_BE):
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0}
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0}
elif byte_str.startswith(codecs.BOM_LE):
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif byte_str.startswith(codecs.BOM_BE):
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._got_data = True
if self.result['encoding'] is not None:
self.done = True
return
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.pure_ascii:
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.high_byte
elif self._input_state == InputState.pure_ascii and \
self.ESC_DETECTOR.search(self._last_char + byte_str):
self._input_state = InputState.esc_ascii
self._last_char = byte_str[-1:]
# If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that
# use such sequences.
if self._input_state == InputState.esc_ascii:
if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.found_it:
self.result = {'encoding':
self._esc_charset_prober.charset_name,
'confidence':
self._esc_charset_prober.get_confidence()}
self.done = True
# If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and
# single-byte probers that are left. The single-byte probers
# use character bigram distributions to determine the encoding, whereas
# the multi-byte probers use a combination of character unigram and
# bigram distributions.
elif self._input_state == InputState.high_byte:
if not self._charset_probers:
self._charset_probers = [MBCSGroupProber(self.lang_filter)]
# If we're checking non-CJK encodings, use single-byte prober
if self.lang_filter & LanguageFilter.non_cjk:
self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober())
for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.found_it:
self.result = {'encoding': prober.charset_name,
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
"""
Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute if a prediction was made, otherwise
``None``.
"""
if self.done:
return self.result
if not self._got_data:
self.logger.debug('no data received!')
return
self.done = True
if self._input_state == InputState.pure_ascii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._input_state == InputState.high_byte:
proberConfidence = None
max_prober_confidence = 0.0
max_prober = None
for prober in self._charset_probers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > max_prober_confidence:
max_prober_confidence = proberConfidence
max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
self.result = {'encoding': max_prober.charset_name,
'confidence': max_prober.get_confidence()}
return self.result
if self.logger.getEffectiveLevel() == logging.DEBUG:
self.logger.debug('no probers hit minimum threshhold')
for prober in self._charset_probers[0].mProbers:
if not prober:
continue
self.logger.debug('%s confidence = %s', prober.charset_name,
prober.get_confidence())
|
baffolobill/mb_test_1
|
refs/heads/master
|
src/mbtest1/erp_client/urls.py
|
1
|
# coding: utf-8
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.NodeListView.as_view(), name='client-index'),
url(r'^nodes/', include([
url(r'^$', views.NodeListView.as_view(), name='node-list'),
url(r'^create/$', views.NodeCreateView.as_view(), name='node-create'),
url(r'^(?P<pk>\d+)/$', views.NodeDetailView.as_view(), name='node-detail'),
url(r'^(?P<pk>\d+)/update/$', views.NodeUpdateView.as_view(), name='node-update'),
url(r'^(?P<pk>\d+)/delete/$', views.NodeDeleteView.as_view(), name='node-delete'),
])),
url(r'^floors/', include([
url(r'^$', views.FloorListView.as_view(), name='floor-list'),
url(r'^create/$', views.FloorCreateView.as_view(), name='floor-create'),
url(r'^(?P<pk>\d+)/$', views.FloorDetailView.as_view(), name='floor-detail'),
url(r'^(?P<pk>\d+)/update/$', views.FloorUpdateView.as_view(), name='floor-update'),
url(r'^(?P<pk>\d+)/delete/$', views.FloorDeleteView.as_view(), name='floor-delete'),
])),
url(r'^rooms/', include([
url(r'^$', views.RoomListView.as_view(), name='room-list'),
url(r'^create/$', views.RoomCreateView.as_view(), name='room-create'),
url(r'^(?P<pk>\d+)/$', views.RoomDetailView.as_view(), name='room-detail'),
url(r'^(?P<pk>\d+)/update/$', views.RoomUpdateView.as_view(), name='room-update'),
url(r'^(?P<pk>\d+)/delete/$', views.RoomDeleteView.as_view(), name='room-delete'),
])),
url(r'^rows/', include([
url(r'^$', views.RowListView.as_view(), name='row-list'),
url(r'^create/$', views.RowCreateView.as_view(), name='row-create'),
url(r'^(?P<pk>\d+)/$', views.RowDetailView.as_view(), name='row-detail'),
url(r'^(?P<pk>\d+)/update/$', views.RowUpdateView.as_view(), name='row-update'),
url(r'^(?P<pk>\d+)/delete/$', views.RowDeleteView.as_view(), name='row-delete'),
])),
url(r'^racks/', include([
url(r'^$', views.RackListView.as_view(), name='rack-list'),
url(r'^create/$', views.RackCreateView.as_view(), name='rack-create'),
url(r'^(?P<pk>\d+)/$', views.RackDetailView.as_view(), name='rack-detail'),
url(r'^(?P<pk>\d+)/update/$', views.RackUpdateView.as_view(), name='rack-update'),
url(r'^(?P<pk>\d+)/delete/$', views.RackDeleteView.as_view(), name='rack-delete'),
url(r'^(?P<pk>\d+)/actions/$', views.RackActionsView.as_view(), name='rack-actions'),
])),
url(r'^baskets/', include([
url(r'^$', views.BasketListView.as_view(), name='basket-list'),
url(r'^create/$', views.BasketCreateView.as_view(), name='basket-create'),
url(r'^(?P<pk>\d+)/$', views.BasketDetailView.as_view(), name='basket-detail'),
url(r'^(?P<pk>\d+)/update/$', views.BasketUpdateView.as_view(), name='basket-update'),
url(r'^(?P<pk>\d+)/delete/$', views.BasketDeleteView.as_view(), name='basket-delete'),
url(r'^(?P<pk>\d+)/actions/$', views.BasketActionsView.as_view(), name='basket-actions'),
])),
url(r'^servers/', include([
url(r'^$', views.ServerListView.as_view(), name='server-list'),
url(r'^create/$', views.ServerCreateView.as_view(), name='server-create'),
url(r'^(?P<pk>\d+)/$', views.ServerDetailView.as_view(), name='server-detail'),
url(r'^(?P<pk>\d+)/update/$', views.ServerUpdateView.as_view(), name='server-update'),
url(r'^(?P<pk>\d+)/delete/$', views.ServerDeleteView.as_view(), name='server-delete'),
url(r'^(?P<pk>\d+)/actions/$', views.ServerActionsView.as_view(), name='server-actions'),
])),
url(r'^server-templates/', include([
url(r'^$', views.ServerTemplateListView.as_view(), name='servertemplate-list'),
url(r'^create/$', views.ServerTemplateCreateView.as_view(), name='servertemplate-create'),
url(r'^(?P<pk>\d+)/$', views.ServerTemplateDetailView.as_view(), name='servertemplate-detail'),
url(r'^(?P<pk>\d+)/update/$', views.ServerTemplateUpdateView.as_view(), name='servertemplate-update'),
url(r'^(?P<pk>\d+)/delete/$', views.ServerTemplateDeleteView.as_view(), name='servertemplate-delete'),
])),
url(r'^components/', include([
url(r'^$', views.ComponentListView.as_view(), name='component-list'),
url(r'^create/$', views.ComponentCreateView.as_view(), name='component-create'),
url(r'^(?P<pk>\d+)/$', views.ComponentDetailView.as_view(), name='component-detail'),
url(r'^(?P<pk>\d+)/update/$', views.ComponentUpdateView.as_view(), name='component-update'),
url(r'^(?P<pk>\d+)/delete/$', views.ComponentDeleteView.as_view(), name='component-delete'),
url(r'^(?P<pk>\d+)/update-properties/$', views.ComponentUpdatePropertiesView.as_view(), name='component-update-properties'),
])),
]
|
marinho/geraldo
|
refs/heads/master
|
site/newsite/site-geraldo/gae_wiki/views.py
|
9
|
from google.appengine.api import users
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.cache import cache_page, never_cache
from django.core.cache import cache
from django.template.loader import render_to_string
from utils.decorators import page, admin_required
from utils.shortcuts import get_object_or_404
from utils.serialization import serialize
from models import Wiki
from forms import FormWiki
import app_settings
#@cache_page(60)
def wiki(request, slug):
ret = cache.get(Wiki.get_cache_key(slug), None)
if not ret:
wiki = get_object_or_404(Wiki, slug=slug)
tpl = wiki.template or 'gae_wiki/wiki.html'
ret = render_to_response(
tpl,
locals(),
context_instance=RequestContext(request),
)
ret = ret._get_content()
if wiki.cacheable:
cache.set(Wiki.get_cache_key(slug), ret, app_settings.WIKI_CACHE_TIMEOUT)
return HttpResponse(ret)
@page('gae_wiki/sequence.html')
def wiki_sequence(request):
wikis = Wiki.all().order('sequence').filter('show_in_rss =', True)
return locals()
# Admin
@login_required
@admin_required
@page('admin/gae_wiki/index.html')
def admin_index(request):
list = Wiki.all()
return locals()
@never_cache
@login_required
@admin_required
@page('admin/gae_wiki/wiki/edit.html')
def admin_wiki_edit(request, id=None):
wiki = id and Wiki.get_by_id(int(id)) or None
if request.POST:
form = FormWiki(request.POST, files=request.FILES, instance=wiki)
if form.is_valid():
wiki = form.save(False)
wiki.author = users.get_current_user()
wiki.save()
return HttpResponseRedirect(reverse('gae_wiki.views.admin_index'))
else:
form = FormWiki(instance=wiki)
return locals()
@never_cache
@login_required
@admin_required
def admin_wiki_delete(request, id=None):
wiki = id and Wiki.get_by_id(int(id)) or None
if not wiki:
raise Http404
wiki.delete()
return HttpResponseRedirect('/admin/gae_wiki/')
@never_cache
@login_required
@admin_required
def admin_wiki_export_all(request):
items = Wiki.all().order('title')
#ret = render_to_string('gae_wiki/export_all.txt', locals())
ret = serialize(items)
return HttpResponse(ret, mimetype='text/xml')
|
tareqalayan/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_project.py
|
30
|
#!/usr/bin/python
# Copyright (c) 2015 IBM Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_project
short_description: Manage OpenStack Projects
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Alberto Gireud (@agireud)"
description:
- Manage OpenStack Projects. Projects can be created,
updated or deleted using this module. A project will be updated
if I(name) matches an existing project and I(state) is present.
The value for I(name) cannot be updated without deleting and
re-creating the project.
options:
name:
description:
- Name for the project
required: true
description:
description:
- Description for the project
domain_id:
description:
- Domain id to create the project in if the cloud supports domains.
aliases: ['domain']
enabled:
description:
- Is the project enabled
type: bool
default: 'yes'
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a project
- os_project:
cloud: mycloud
endpoint_type: admin
state: present
name: demoproject
description: demodescription
domain_id: demoid
enabled: True
# Delete a project
- os_project:
cloud: mycloud
endpoint_type: admin
state: absent
name: demoproject
'''
RETURN = '''
project:
description: Dictionary describing the project.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Project ID
type: string
sample: "f59382db809c43139982ca4189404650"
name:
description: Project name
type: string
sample: "demoproject"
description:
description: Project description
type: string
sample: "demodescription"
enabled:
description: Boolean to indicate if project is enabled
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, project):
keys = ('description', 'enabled')
for key in keys:
if module.params[key] is not None and module.params[key] != project.get(key):
return True
return False
def _system_state_change(module, project):
state = module.params['state']
if state == 'present':
if project is None:
changed = True
else:
if _needs_update(module, project):
changed = True
else:
changed = False
elif state == 'absent':
if project is None:
changed = False
else:
changed = True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(required=False, default=None),
domain_id=dict(required=False, default=None, aliases=['domain']),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present'])
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
**module_kwargs
)
name = module.params['name']
description = module.params['description']
domain = module.params.get('domain_id')
enabled = module.params['enabled']
state = module.params['state']
sdk, cloud = openstack_cloud_from_module(module)
try:
if domain:
try:
# We assume admin is passing domain id
dom = cloud.get_domain(domain)['id']
domain = dom
except:
# If we fail, maybe admin is passing a domain name.
# Note that domains have unique names, just like id.
try:
dom = cloud.search_domains(filters={'name': domain})[0]['id']
domain = dom
except:
# Ok, let's hope the user is non-admin and passing a sane id
pass
if domain:
project = cloud.get_project(name, domain_id=domain)
else:
project = cloud.get_project(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, project))
if state == 'present':
if project is None:
project = cloud.create_project(
name=name, description=description,
domain_id=domain,
enabled=enabled)
changed = True
else:
if _needs_update(module, project):
project = cloud.update_project(
project['id'], description=description,
enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, project=project)
elif state == 'absent':
if project is None:
changed = False
else:
cloud.delete_project(project['id'])
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
if __name__ == '__main__':
main()
|
pengli09/Paddle
|
refs/heads/develop
|
benchmark/tensorflow/rnn/reader.py
|
13
|
import os.path
import io
import numpy as np
import tensorflow as tf
# tflearn
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
FLAGS = tf.app.flags.FLAGS
class DataSet(object):
def __init__(self, data, labels):
assert data.shape[0] == labels.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape, labels.shape))
self._num_examples = data.shape[0]
self._data = data
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def data(self):
return self._data
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
assert batch_size <= self._num_examples
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._data = self._data[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self._data[start:end], self._labels[start:end]
def create_datasets(file_path, vocab_size=30000, val_fraction=0.0):
# IMDB Dataset loading
train, test, _ = imdb.load_data(
path=file_path,
n_words=vocab_size,
valid_portion=val_fraction,
sort_by_len=False)
trainX, trainY = train
testX, testY = test
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=FLAGS.max_len, value=0.)
testX = pad_sequences(testX, maxlen=FLAGS.max_len, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
train_dataset = DataSet(trainX, trainY)
return train_dataset
def main():
create_datasets('imdb.pkl')
if __name__ == "__main__":
main()
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.py
|
148
|
"""This module implements additional tests ala autoconf which can be useful.
"""
from __future__ import division, absolute_import, print_function
# We put them here since they could be easily reused outside numpy.distutils
def check_inline(cmd):
"""Return the inline identifier (may be empty)."""
cmd._check_compiler()
body = """
#ifndef __cplusplus
static %(inline)s int static_func (void)
{
return 0;
}
%(inline)s int nostatic_func (void)
{
return 0;
}
#endif"""
for kw in ['inline', '__inline__', '__inline']:
st = cmd.try_compile(body % {'inline': kw}, None, None)
if st:
return kw
return ''
def check_restrict(cmd):
"""Return the restrict identifier (may be empty)."""
cmd._check_compiler()
body = """
static int static_func (char * %(restrict)s a)
{
return 0;
}
"""
for kw in ['restrict', '__restrict__', '__restrict']:
st = cmd.try_compile(body % {'restrict': kw}, None, None)
if st:
return kw
return ''
def check_compiler_gcc4(cmd):
"""Return True if the C compiler is GCC 4.x."""
cmd._check_compiler()
body = """
int
main()
{
#if (! defined __GNUC__) || (__GNUC__ < 4)
#error gcc >= 4 required
#endif
return 0;
}
"""
return cmd.try_compile(body, None, None)
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s %s(void*);
int
main()
{
return 0;
}
""" % (attribute, name)
return cmd.try_compile(body, None, None) != 0
def check_gcc_variable_attribute(cmd, attribute):
"""Return True if the given variable attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s foo;
int
main()
{
return 0;
}
""" % (attribute, )
return cmd.try_compile(body, None, None) != 0
|
bdh1011/wau
|
refs/heads/master
|
venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py
|
505
|
from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
|
amagdas/superdesk
|
refs/heads/master
|
server/apps/item_lock/components/__init__.py
|
395
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
|
nott/next.filmfest.by
|
refs/heads/master
|
cpm_data/models.py
|
1
|
from __future__ import unicode_literals
from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel
from wagtail.wagtailcore.models import Orderable # TODO: is this good?
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from modeladminutils.edit_handlers import AdminModelChooserPanel
from cpm_generic.constants import COUNTRIES
from cpm_generic.models import TranslatedField
from cpm_data.queryset import SearchableQuerySet
class BaseSearchableManager(models.Manager):
def get_queryset(self):
return SearchableQuerySet(self.model)
SearchableManager = BaseSearchableManager.from_queryset(SearchableQuerySet)
class Film(index.Indexed, ClusterableModel):
"""Model representing accepted film
Submissions contain raw data that need to be preprocessed/translated
before publishing. This model contains all the data about an accepted
submission that will be published.
"""
submission = models.ForeignKey(
'submissions.Submission',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
title_en = models.CharField(max_length=1000, default='', blank=True)
title_be = models.CharField(max_length=1000, default='', blank=True)
title_ru = models.CharField(max_length=1000, default='', blank=True)
title = TranslatedField('title_en', 'title_be', 'title_ru')
director_en = models.CharField(max_length=1000, default='', blank=True)
director_be = models.CharField(max_length=1000, default='', blank=True)
director_ru = models.CharField(max_length=1000, default='', blank=True)
director = TranslatedField('director_en', 'director_be', 'director_ru')
country = models.CharField(max_length=2, choices=COUNTRIES,
null=True, blank=True)
city_en = models.CharField(max_length=100, default='', blank=True)
city_be = models.CharField(max_length=100, default='', blank=True)
city_ru = models.CharField(max_length=100, default='', blank=True)
city = TranslatedField('city_en', 'city_be', 'city_ru')
year = models.IntegerField(null=True, blank=True)
duration_en = models.CharField(max_length=100, default='', blank=True)
duration_be = models.CharField(max_length=100, default='', blank=True)
duration_ru = models.CharField(max_length=100, default='', blank=True)
duration = TranslatedField('duration_en', 'duration_be', 'duration_ru')
genre_en = models.CharField(max_length=1000, default='', blank=True)
genre_be = models.CharField(max_length=1000, default='', blank=True)
genre_ru = models.CharField(max_length=1000, default='', blank=True)
genre = TranslatedField('genre_en', 'genre_be', 'genre_ru')
synopsis_short_en = RichTextField(default='', blank=True)
synopsis_short_be = RichTextField(default='', blank=True)
synopsis_short_ru = RichTextField(default='', blank=True)
synopsis_short = TranslatedField('synopsis_short_en',
'synopsis_short_be',
'synopsis_short_ru')
synopsis_en = RichTextField(default='', blank=True)
synopsis_be = RichTextField(default='', blank=True)
synopsis_ru = RichTextField(default='', blank=True)
synopsis = TranslatedField('synopsis_en', 'synopsis_be', 'synopsis_ru')
frame = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
objects = SearchableManager()
def __unicode__(self):
return u'"{}" / {}'.format(self.title, self.director)
panels = [
FieldPanel('submission'),
FieldPanel('title_en'),
FieldPanel('title_be'),
FieldPanel('title_ru'),
FieldPanel('director_en'),
FieldPanel('director_be'),
FieldPanel('director_ru'),
FieldPanel('country'),
FieldPanel('city_en'),
FieldPanel('city_be'),
FieldPanel('city_ru'),
FieldPanel('genre_en'),
FieldPanel('genre_be'),
FieldPanel('genre_ru'),
FieldPanel('year'),
FieldPanel('duration_en'),
FieldPanel('duration_be'),
FieldPanel('duration_ru'),
FieldPanel('synopsis_short_en'),
FieldPanel('synopsis_short_be'),
FieldPanel('synopsis_short_ru'),
FieldPanel('synopsis_en'),
FieldPanel('synopsis_be'),
FieldPanel('synopsis_ru'),
ImageChooserPanel('frame'),
]
search_fields = [
index.SearchField('title_en', partial_match=True, boost=2),
index.SearchField('title_be', partial_match=True, boost=2),
index.SearchField('title_ru', partial_match=True, boost=2),
index.SearchField('director_en', partial_match=True, boost=2),
index.SearchField('director_be', partial_match=True, boost=2),
index.SearchField('director_ru', partial_match=True, boost=2),
index.SearchField('synopsis_short_en', partial_match=True),
index.SearchField('synopsis_short_be', partial_match=True),
index.SearchField('synopsis_short_ru', partial_match=True),
index.SearchField('synopsis_en', partial_match=True),
index.SearchField('synopsis_be', partial_match=True),
index.SearchField('synopsis_ru', partial_match=True),
]
class JuryMember(ClusterableModel):
name_en = models.CharField(max_length=250)
name_be = models.CharField(max_length=250)
name_ru = models.CharField(max_length=250)
name = TranslatedField('name_en', 'name_be', 'name_ru')
info_en = models.CharField(max_length=5000)
info_be = models.CharField(max_length=5000)
info_ru = models.CharField(max_length=5000)
info = TranslatedField('info_en', 'info_be', 'info_ru')
photo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
country = models.CharField(max_length=2, choices=COUNTRIES)
def __unicode__(self):
return self.name
panels = [
FieldPanel('name_en'),
FieldPanel('name_be'),
FieldPanel('name_ru'),
ImageChooserPanel('photo'),
FieldPanel('country'),
FieldPanel('info_en'),
FieldPanel('info_be'),
FieldPanel('info_ru'),
]
class SeasonRelatedJuryMember(Orderable):
season = ParentalKey('Season', related_name='related_jury_members')
jury_member = models.ForeignKey(
'cpm_data.JuryMember',
null=True,
blank=True,
related_name='+'
)
category_en = models.CharField(max_length=250, blank=True, default='')
category_be = models.CharField(max_length=250, blank=True, default='')
category_ru = models.CharField(max_length=250, blank=True, default='')
category = TranslatedField('category_en', 'category_be', 'category_ru')
name = property(lambda self: self.jury_member.name)
info = property(lambda self: self.jury_member.info)
photo = property(lambda self: self.jury_member.photo)
country = property(lambda self: self.jury_member.country)
panels = [
AdminModelChooserPanel('jury_member'),
FieldPanel('category_en'),
FieldPanel('category_be'),
FieldPanel('category_ru'),
]
class Partner(ClusterableModel):
name_en = models.CharField(max_length=250)
name_be = models.CharField(max_length=250)
name_ru = models.CharField(max_length=250)
name = TranslatedField('name_en', 'name_be', 'name_ru')
link = models.CharField(max_length=250, blank=True, default='')
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def __unicode__(self):
return self.name
panels = [
FieldPanel('name_en'),
FieldPanel('name_be'),
FieldPanel('name_ru'),
FieldPanel('link'),
ImageChooserPanel('image'),
]
class SeasonRelatedPartner(Orderable):
season = ParentalKey('Season', related_name='related_partners')
partner = models.ForeignKey(
'cpm_data.Partner',
null=True,
blank=True,
related_name='+'
)
name = property(lambda self: self.partner.name)
link = property(lambda self: self.partner.link)
image = property(lambda self: self.partner.image)
panels = [
AdminModelChooserPanel('partner'),
]
class Season(ClusterableModel):
name_en = models.CharField(max_length=250)
name_be = models.CharField(max_length=250)
name_ru = models.CharField(max_length=250)
name = TranslatedField('name_en', 'name_be', 'name_ru')
panels = [
FieldPanel('name_en'),
FieldPanel('name_be'),
FieldPanel('name_ru'),
InlinePanel('related_jury_members', label="Jury members"),
InlinePanel('related_partners', label="Partners"),
]
def __unicode__(self):
return self.name
@classmethod
def get_current(cls):
return cls.objects.get(name_en=u'2017')
|
my7seven/ansible
|
refs/heads/devel
|
samples/lookup_pipe.py
|
255
|
- hosts: localhost
gather_facts: no
tasks:
- debug: msg="the date is {{ lookup('pipe', 'date') }}"
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query1.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Nexmark Query 1: Convert bid prices from dollars to euros.
The Nexmark suite is a series of queries (streaming pipelines) performed
on a simulation of auction events.
This query converts bid prices from dollars to euros.
It illustrates a simple map.
"""
# pytype: skip-file
import apache_beam as beam
from apache_beam.testing.benchmarks.nexmark.models import nexmark_model
from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util
USD_TO_EURO = 0.89
def load(events, metadata=None, pipeline_options=None):
return (
events
| nexmark_query_util.JustBids()
| 'ConvertToEuro' >> beam.Map(
lambda bid: nexmark_model.Bid(
bid.auction,
bid.bidder,
bid.price * USD_TO_EURO,
bid.date_time,
bid.extra)))
|
kamcpp/tensorflow
|
refs/heads/master
|
tensorflow/python/client/__init__.py
|
12133432
| |
jabber-at/hp
|
refs/heads/master
|
hp/antispam/migrations/__init__.py
|
12133432
| |
dancingdan/tensorflow
|
refs/heads/master
|
tensorflow/python/estimator/canned/metric_keys.py
|
11
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enum for model prediction keys."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import model_fn
class MetricKeys(object):
"""Metric key strings."""
LOSS = model_fn.LOSS_METRIC_KEY
LOSS_MEAN = model_fn.AVERAGE_LOSS_METRIC_KEY
LOSS_REGULARIZATION = 'regularization_loss'
ACCURACY = 'accuracy'
PRECISION = 'precision'
RECALL = 'recall'
# This is the best the model could do by always predicting one class.
# Should be < ACCURACY in a trained model.
ACCURACY_BASELINE = 'accuracy_baseline'
AUC = 'auc'
AUC_PR = 'auc_precision_recall'
LABEL_MEAN = 'label/mean'
PREDICTION_MEAN = 'prediction/mean'
# The following require a threshold applied, should be float in range (0, 1).
ACCURACY_AT_THRESHOLD = 'accuracy/positive_threshold_%g'
PRECISION_AT_THRESHOLD = 'precision/positive_threshold_%g'
RECALL_AT_THRESHOLD = 'recall/positive_threshold_%g'
# The following require a class id applied.
PROBABILITY_MEAN_AT_CLASS = 'probability_mean/class%d'
AUC_AT_CLASS = 'auc/class%d'
AUC_PR_AT_CLASS = 'auc_precision_recall/class%d'
# The following require a class name applied.
PROBABILITY_MEAN_AT_NAME = 'probability_mean/%s'
AUC_AT_NAME = 'auc/%s'
AUC_PR_AT_NAME = 'auc_precision_recall/%s'
|
consulo/consulo-python
|
refs/heads/master
|
plugin/src/test/resources/override/typeAnnotations.py
|
83
|
class ArgsTest:
def __init__(self, key:str=None, value:str=None,
max_age=None, expires=None, path:str=None, domain:str=None,
secure:bool=False, httponly:bool=False, sync_expires:bool=True,
comment:str=None, version:int=None): pass
class Sub(ArgsTest):
pass
|
AllenDowney/ThinkStats2
|
refs/heads/master
|
solutions/nsfg.py
|
3
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import sys
import numpy as np
import thinkstats2
from collections import defaultdict
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
nrows=None):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file)
df = dct.ReadFixedWidth(dat_file, compression='gzip', nrows=nrows)
CleanFemResp(df)
return df
def CleanFemResp(df):
"""Recodes variables from the respondent frame.
df: DataFrame
"""
pass
def ReadFemPreg(dct_file='2002FemPreg.dct',
dat_file='2002FemPreg.dat.gz'):
"""Reads the NSFG pregnancy data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file)
df = dct.ReadFixedWidth(dat_file, compression='gzip')
CleanFemPreg(df)
return df
def CleanFemPreg(df):
"""Recodes variables from the pregnancy frame.
df: DataFrame
"""
# mother's age is encoded in centiyears; convert to years
df.agepreg /= 100.0
# birthwgt_lb contains at least one bogus value (51 lbs)
# replace with NaN
df.loc[df.birthwgt_lb > 20, 'birthwgt_lb'] = np.nan
# replace 'not ascertained', 'refused', 'don't know' with NaN
na_vals = [97, 98, 99]
df.birthwgt_lb.replace(na_vals, np.nan, inplace=True)
df.birthwgt_oz.replace(na_vals, np.nan, inplace=True)
df.hpagelb.replace(na_vals, np.nan, inplace=True)
df.babysex.replace([7, 9], np.nan, inplace=True)
df.nbrnaliv.replace([9], np.nan, inplace=True)
# birthweight is stored in two columns, lbs and oz.
# convert to a single column in lb
# NOTE: creating a new column requires dictionary syntax,
# not attribute assignment (like df.totalwgt_lb)
df['totalwgt_lb'] = df.birthwgt_lb + df.birthwgt_oz / 16.0
# due to a bug in ReadStataDct, the last variable gets clipped;
# so for now set it to NaN
df.cmintvw = np.nan
def ValidatePregnum(resp, preg):
"""Validate pregnum in the respondent file.
resp: respondent DataFrame
preg: pregnancy DataFrame
"""
# make the map from caseid to list of pregnancy indices
preg_map = MakePregMap(preg)
# iterate through the respondent pregnum series
for index, pregnum in resp.pregnum.iteritems():
caseid = resp.caseid[index]
indices = preg_map[caseid]
# check that pregnum from the respondent file equals
# the number of records in the pregnancy file
if len(indices) != pregnum:
print(caseid, len(indices), pregnum)
return False
return True
def MakePregMap(df):
"""Make a map from caseid to list of preg indices.
df: DataFrame
returns: dict that maps from caseid to list of indices into `preg`
"""
d = defaultdict(list)
for index, caseid in df.caseid.iteritems():
d[caseid].append(index)
return d
def main():
"""Tests the functions in this module.
script: string script name
"""
# read and validate the respondent file
resp = ReadFemResp()
assert(len(resp) == 7643)
assert(resp.pregnum.value_counts()[1] == 1267)
# read and validate the pregnancy file
preg = ReadFemPreg()
print(preg.shape)
assert len(preg) == 13593
assert preg.caseid[13592] == 12571
assert preg.pregordr.value_counts()[1] == 5033
assert preg.nbrnaliv.value_counts()[1] == 8981
assert preg.babysex.value_counts()[1] == 4641
assert preg.birthwgt_lb.value_counts()[7] == 3049
assert preg.birthwgt_oz.value_counts()[0] == 1037
assert preg.prglngth.value_counts()[39] == 4744
assert preg.outcome.value_counts()[1] == 9148
assert preg.birthord.value_counts()[1] == 4413
assert preg.agepreg.value_counts()[22.75] == 100
assert preg.totalwgt_lb.value_counts()[7.5] == 302
weights = preg.finalwgt.value_counts()
key = max(weights.keys())
assert preg.finalwgt.value_counts()[key] == 6
# validate that the pregnum column in `resp` matches the number
# of entries in `preg`
assert(ValidatePregnum(resp, preg))
print('All tests passed.')
if __name__ == '__main__':
main()
|
etherkit/OpenBeacon2
|
refs/heads/master
|
client/linux-x86/venv/lib/python3.8/site-packages/setuptools/dep_util.py
|
316
|
from distutils.dep_util import newer_group
# yes, this is was almost entirely copy-pasted from
# 'newer_pairwise()', this is just another convenience
# function.
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets
|
weidnem/IntroPython2016
|
refs/heads/master
|
students/cowhey/session03/mailroom.py
|
3
|
#!/usr/bin/env python
def accept_donation():
amount = input("How much did this person donate? \n")
if amount == "x" or check_if_number(amount):
return amount
else:
print("You must enter a numeric amount.")
return accept_donation()
def check_if_number(num):
try:
float(num)
return True
except ValueError or TypeError:
return False
def create_reports():
donor_rows = []
for donor in donors:
new_row = []
new_row.append(donor[0])
new_row.append("{:.2f}".format(sum(donor[1:])))
new_row.append(str(len(donor[1:])))
new_row.append("{:.2f}".format(float(new_row[1]) / int(new_row[2])))
donor_rows.append(new_row)
return donor_rows
def format_row(row):
formatted = []
for item in row:
formatted.append('{:30}'.format(item))
return ' '.join(formatted)
def print_donor_list():
print("These are the donors you have in your database:")
for donor in donors:
print(donor[0])
print()
def print_report():
print("This will print a report")
header_row = ["Name", "Total Donated", "Number of Donations", "Average Donation"]
donor_rows = create_reports()
sorted_donors = sort_donors(donor_rows)
print(format_row(header_row))
for row in sorted_donors:
print(format_row(row))
print()
def send_thanks():
print("This will write a thank you note")
donor_names = [x[0] for x in donors]
thank_you_command = input("Type the full name of the person you would like to thank. \nOr type 'list' to see a list of donors.\n")
if thank_you_command == "x":
return
elif thank_you_command.lower() == "list":
print_donor_list()
else:
donation_amount = accept_donation()
if donation_amount == "x":
return
else:
donation_amount = float(donation_amount)
if thank_you_command in donor_names:
name = thank_you_command
donors[donor_names.index(name)].append(donation_amount)
print("Adding ${:02.2f} to {}'s donations.".format(donation_amount, name))
print_thank_you(name, donation_amount)
else:
print(donation_amount)
name = thank_you_command
donors.append([name, donation_amount])
print("Adding {} as a donor, with a donation of ${:02.2f}.".format(name, donation_amount))
print_thank_you(name, donation_amount)
# here is where triple quoted strings can be helpful
msg = """
What would you like to do?
To send a thank you: type "s"
To print a report: type "p"
To exit: type "x"
"""
donors = [["Wilmot Filipe", 18.00, 72.00], ["Neoptolemus Yaropolk", 36.00], ["Mahesha Diogenes", 90.00, 54.00, 18.00], ["Arthur Tjaz", 18.00], ["Luuk Sinclair", 180.00]]
def main():
"""
run the main interactive loop
"""
response = ""
# keep asking until the users responds with an 'x'
while True: # make sure there is a break if you have infinite loop!
print(msg)
response = input("==> ").strip().lower() # strip() in case there are any spaces
if response == 'p':
print_report()
elif response == 's':
send_thanks()
elif response == 'x':
break
else:
print('please type "s", "p", or "x"')
def print_thank_you(name, donation):
print("Sending this email: ")
print("Dear {},\nThank you for your gift of ${:02.2f} to the Fund for Unmatched Socks. Your help will be greatly appreciated by all those who partake in our holey work.".format(name, donation))
print()
def sort_donors(donor_list):
sorted_donors = []
sorted_donors.append(donor_list[0])
for donor in donor_list[1:]:
for x in range(len(sorted_donors)):
if float(donor[1]) > float(sorted_donors[x][1]):
sorted_donors.insert(x, donor)
break
elif x == len(sorted_donors) - 1:
sorted_donors.append(donor)
return sorted_donors
if __name__ == "__main__":
main()
|
Ban3/Limnoria
|
refs/heads/master
|
plugins/News/plugin.py
|
6
|
###
# Copyright (c) 2003-2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import time
import supybot.dbi as dbi
import supybot.conf as conf
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('News')
class DbiNewsDB(plugins.DbiChannelDB):
class DB(dbi.DB):
class Record(dbi.Record):
__fields__ = [
'subject',
'text',
'at',
'expires',
'by',
]
def __str__(self):
user = plugins.getUserName(self.by)
if self.expires == 0:
s = format(_('%s (Subject: %q, added by %s on %s)'),
self.text, self.subject, self.by,
utils.str.timestamp(self.at))
else:
s = format(_('%s (Subject: %q, added by %s on %s, '
'expires at %s)'),
self.text, self.subject, user,
utils.str.timestamp(self.at),
utils.str.timestamp(self.expires))
return s
def __init__(self, filename):
# We use self.__class__ here because apparently DB isn't in our
# scope. python--
self.__parent = super(self.__class__, self)
self.__parent.__init__(filename)
def add(self, subject, text, at, expires, by):
return self.__parent.add(self.Record(at=at, by=by, text=text,
subject=subject, expires=expires))
def getOld(self, id=None):
now = time.time()
if id:
return self.get(id)
else:
L = [R for R in self if R.expires < now and R.expires != 0]
if not L:
raise dbi.NoRecordError
else:
return L
def get(self, id=None):
now = time.time()
if id:
return self.__parent.get(id)
else:
L = [R for R in self if R.expires >= now or R.expires == 0]
if not L:
raise dbi.NoRecordError
return L
def change(self, id, f):
news = self.get(id)
s = '%s: %s' % (news.subject, news.text)
s = f(s)
(news.subject, news.text) = s.split(': ', 1)
self.set(id, news)
NewsDB = plugins.DB('News', {'flat': DbiNewsDB})
class News(callbacks.Plugin):
"""This plugin provides a means of maintaining News for a channel."""
def __init__(self, irc):
self.__parent = super(News, self)
self.__parent.__init__(irc)
self.db = NewsDB()
def die(self):
self.__parent.die()
self.db.close()
@internationalizeDocstring
def add(self, irc, msg, args, channel, user, at, expires, news):
"""[<channel>] <expires> <subject>: <text>
Adds a given news item of <text> to a channel with the given <subject>.
If <expires> isn't 0, that news item will expire <expires> seconds from
now. <channel> is only necessary if the message isn't sent in the
channel itself.
"""
try:
(subject, text) = news.split(': ', 1)
except ValueError:
raise callbacks.ArgumentError
id = self.db.add(channel, subject, text, at, expires, user.id)
irc.replySuccess(format(_('(News item #%i added)'), id))
add = wrap(add, ['channeldb', 'user', 'now', 'expiry', 'text'])
@internationalizeDocstring
def news(self, irc, msg, args, channel, id):
"""[<channel>] [<id>]
Display the news items for <channel> in the format of '(#id) subject'.
If <id> is given, retrieve only that news item; otherwise retrieve all
news items. <channel> is only necessary if the message isn't sent in
the channel itself.
"""
if not id:
try:
records = self.db.get(channel)
items = [format('(#%i) %s', R.id, R.subject) for R in records]
s = format(_('News for %s: %s'), channel, '; '.join(items))
irc.reply(s)
except dbi.NoRecordError:
irc.reply(format(_('No news for %s.'), channel))
else:
try:
record = self.db.get(channel, id)
irc.reply(str(record))
except dbi.NoRecordError as id:
irc.errorInvalid(_('news item id'), id)
news = wrap(news, ['channeldb', additional('positiveInt')])
@internationalizeDocstring
def remove(self, irc, msg, args, channel, id):
"""[<channel>] <id>
Removes the news item with <id> from <channel>. <channel> is only
necessary if the message isn't sent in the channel itself.
"""
try:
self.db.remove(channel, id)
irc.replySuccess()
except dbi.NoRecordError:
irc.errorInvalid(_('news item id'), id)
remove = wrap(remove, ['channeldb', 'positiveInt'])
@internationalizeDocstring
def change(self, irc, msg, args, channel, id, replacer):
"""[<channel>] <id> <regexp>
Changes the news item with <id> from <channel> according to the
regular expression <regexp>. <regexp> should be of the form
s/text/replacement/flags. <channel> is only necessary if the message
isn't sent on the channel itself.
"""
try:
self.db.change(channel, id, replacer)
irc.replySuccess()
except dbi.NoRecordError:
irc.errorInvalid(_('news item id'), id)
change = wrap(change, ['channeldb', 'positiveInt', 'regexpReplacer'])
@internationalizeDocstring
def old(self, irc, msg, args, channel, id):
"""[<channel>] [<id>]
Returns the old news item for <channel> with <id>. If no number is
given, returns all the old news items in reverse order. <channel> is
only necessary if the message isn't sent in the channel itself.
"""
if id:
try:
record = self.db.getOld(channel, id)
irc.reply(str(record))
except dbi.NoRecordError as id:
irc.errorInvalid(_('news item id'), id)
else:
try:
records = self.db.getOld(channel)
items = [format('(#%i) %s', R.id, R.subject) for R in records]
s = format(_('Old news for %s: %s'), channel, '; '.join(items))
irc.reply(s)
except dbi.NoRecordError:
irc.reply(format(_('No old news for %s.'), channel))
old = wrap(old, ['channeldb', additional('positiveInt')])
Class = News
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
visualskyrim/bquick
|
refs/heads/master
|
bquick/bigquery/__init__.py
|
1
|
# -*- coding: utf-8 -*-
import httplib2
import urllib
import uuid
import time
import os
import json
import sys
from apiclient.discovery import build
from oauth2client.client import GoogleCredentials
from urllib2 import HTTPError
from bquick.bigquery import \
table_list_handler, table_delete_handler, table_copy_handler, data_delete_handler
from bquick.command_parser import \
ListCommand, \
ListRegexCommand, \
ListWildcardCommand, \
DeleteFileCommand, \
DeleteNamedCommand, \
DeleteRegexCommand, \
DeleteWildcardCommand, \
CopyFileCommand, \
CopyWildcardCommand, \
CopyRegexCommand, \
DataDeleteFileCommand, \
DataDeleteNamedCommand, \
DataDeleteRegexCommand, \
DataDeleteWildcardCommand
# Retry transport and file IO errors.
RETRYABLE_ERRORS = (httplib2.HttpLib2Error, IOError)
def list_table(list_command):
dataset = list_command.dataset
limit = list_command.limit
if isinstance(list_command, ListCommand):
return table_list_handler.list_all_table(GOOGLE_BIGQUERY_CLIENT,
dataset,
limit)
elif isinstance(list_command, ListRegexCommand):
return table_list_handler.list_regex_table(GOOGLE_BIGQUERY_CLIENT,
dataset,
list_command.table_name_pattern,
limit)
elif isinstance(list_command, ListWildcardCommand):
return table_list_handler.list_wildcard_table(GOOGLE_BIGQUERY_CLIENT,
dataset,
list_command.table_prefix,
list_command.start_date,
list_command.end_date,
limit)
else:
raise ValueError("Unrecognised command type.")
def delete_table(del_command):
dataset = del_command.dataset
if isinstance(del_command, DeleteNamedCommand):
table_delete_handler.delete_table_by_name(
GOOGLE_BIGQUERY_CLIENT, dataset, del_command.table_name)
elif isinstance(del_command, DeleteFileCommand):
table_delete_handler.delete_table_using_file(
GOOGLE_BIGQUERY_CLIENT, dataset, del_command.delete_file)
elif isinstance(del_command, DeleteWildcardCommand):
table_delete_handler.delete_table_with_wildcard(GOOGLE_BIGQUERY_CLIENT,
dataset,
del_command.table_prefix,
del_command.start_date,
del_command.end_date)
elif isinstance(del_command, DeleteRegexCommand):
table_delete_handler.delete_table_with_regex(GOOGLE_BIGQUERY_CLIENT,
dataset,
del_command.table_name_pattern)
else:
raise ValueError("Unrecognised command type.")
def copy_table(cp_command):
dataset = cp_command.dataset
dest = cp_command.dest
if isinstance(cp_command, CopyFileCommand):
copy_file_path = cp_command.copy_file
if not os.path.exists(copy_file_path):
raise ValueError("Given file path doesn't exist: %s" % copy_file_path)
table_copy_handler.copy_table_file(
GOOGLE_BIGQUERY_CLIENT, dataset, dest, copy_file_path)
elif isinstance(cp_command, CopyWildcardCommand):
table_copy_handler.copy_table_wildcard(
GOOGLE_BIGQUERY_CLIENT,
dataset,
dest,
cp_command.table_prefix,
cp_command.start_date,
cp_command.end_date)
elif isinstance(cp_command, CopyRegexCommand):
table_copy_handler.copy_table_regex(
GOOGLE_BIGQUERY_CLIENT, dataset, dest, cp_command.table_name_pattern)
else:
raise ValueError("Unrecognised delete command.")
def delete_data(ddel_command):
dataset = ddel_command.dataset
condition = ddel_command.condition
if isinstance(ddel_command, DataDeleteNamedCommand):
data_delete_handler.delete_data_by_name(
GOOGLE_BIGQUERY_CLIENT, dataset, condition, ddel_command.table_name)
elif isinstance(ddel_command, DataDeleteFileCommand):
data_delete_handler.delete_data_using_file(
GOOGLE_BIGQUERY_CLIENT, dataset, condition, ddel_command.delete_file)
elif isinstance(ddel_command, DataDeleteWildcardCommand):
data_delete_handler.delete_data_with_wildcard(GOOGLE_BIGQUERY_CLIENT,
dataset,
condition,
ddel_command.table_prefix,
ddel_command.start_date,
ddel_command.end_date)
elif isinstance(ddel_command, DataDeleteRegexCommand):
data_delete_handler.delete_data_with_regex(GOOGLE_BIGQUERY_CLIENT,
dataset,
condition,
ddel_command.table_name_pattern)
else:
raise ValueError("Unrecognised command type.")
def __get_bigquery_service():
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
return build('bigquery', 'v2', credentials=credentials)
GOOGLE_BIGQUERY_CLIENT = __get_bigquery_service()
|
rosmo/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cnos/cnos_facts.py
|
37
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2019 Red Hat Inc.
# Copyright (C) 2019 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Module to Collect facts from Lenovo Switches running Lenovo CNOS commands
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_facts
version_added: "2.3"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Collect facts from remote devices running Lenovo CNOS
description:
- Collects a base set of device facts from a remote Lenovo device
running on CNOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
notes:
- Tested against CNOS 10.8.1
options:
authorize:
version_added: "2.6"
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
version_added: "2.6"
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
gather_subset:
version_added: "2.6"
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_facts.
---
- name: Test cnos Facts
cnos_facts:
---
# Collect all facts from the device
- cnos_facts:
gather_subset: all
# Collect only the config and default facts
- cnos_facts:
gather_subset:
- config
# Do not collect hardware facts
- cnos_facts:
gather_subset:
- "!hardware"
'''
RETURN = '''
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the Lenovo CNOS device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the Lenovo CNOS device
returned: always
type: str
ansible_net_version:
description: The CNOS operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: Indicates the active image for the device
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in MB
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system.
This gives information on description, mac address, mtu, speed,
duplex and operstatus
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
'''
import re
from ansible.module_utils.network.cnos.cnos import run_commands
from ansible.module_utils.network.cnos.cnos import check_args
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
self.PERSISTENT_COMMAND_TIMEOUT = 60
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS,
check_rc=False)
def run(self, cmd):
return run_commands(self.module, cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = ['show sys-info', 'show running-config']
def populate(self):
super(Default, self).populate()
data = self.responses[0]
data_run = self.responses[1]
if data:
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
if data_run:
self.facts['hostname'] = self.parse_hostname(data_run)
def parse_version(self, data):
for line in data.split('\n'):
line = line.strip()
match = re.match(r'System Software Revision (.*?)',
line, re.M | re.I)
if match:
vers = line.split(':')
ver = vers[1].strip()
return ver
return "NA"
def parse_hostname(self, data_run):
for line in data_run.split('\n'):
line = line.strip()
match = re.match(r'hostname (.*?)', line, re.M | re.I)
if match:
hosts = line.split()
hostname = hosts[1].strip('\"')
return hostname
return "NA"
def parse_model(self, data):
for line in data.split('\n'):
line = line.strip()
match = re.match(r'System Model (.*?)', line, re.M | re.I)
if match:
mdls = line.split(':')
mdl = mdls[1].strip()
return mdl
return "NA"
def parse_image(self, data):
match = re.search(r'(.*) image(.*)', data, re.M | re.I)
if match:
return "Image1"
else:
return "Image2"
def parse_serialnum(self, data):
for line in data.split('\n'):
line = line.strip()
match = re.match(r'System Serial Number (.*?)', line, re.M | re.I)
if match:
serNums = line.split(':')
ser = serNums[1].strip()
return ser
return "NA"
class Hardware(FactsBase):
COMMANDS = [
'show running-config'
]
def populate(self):
super(Hardware, self).populate()
data = self.run(['show process memory'])
data = to_text(data, errors='surrogate_or_strict').strip()
data = data.replace(r"\n", "\n")
if data:
for line in data.split('\n'):
line = line.strip()
match = re.match(r'Mem: (.*?)', line, re.M | re.I)
if match:
memline = line.split(':')
mems = memline[1].strip().split()
self.facts['memtotal_mb'] = int(mems[0]) / 1024
self.facts['memused_mb'] = int(mems[1]) / 1024
self.facts['memfree_mb'] = int(mems[2]) / 1024
self.facts['memshared_mb'] = int(mems[3]) / 1024
self.facts['memavailable_mb'] = int(mems[5]) / 1024
def parse_memtotal(self, data):
match = re.search(r'^MemTotal:\s*(.*) kB', data, re.M | re.I)
if match:
return int(match.group(1)) / 1024
def parse_memfree(self, data):
match = re.search(r'^MemFree:\s*(.*) kB', data, re.M | re.I)
if match:
return int(match.group(1)) / 1024
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = ['show interface brief']
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data1 = self.run(['show interface status'])
data1 = to_text(data1, errors='surrogate_or_strict').strip()
data1 = data1.replace(r"\n", "\n")
data2 = self.run(['show interface mac-address'])
data2 = to_text(data2, errors='surrogate_or_strict').strip()
data2 = data2.replace(r"\n", "\n")
lines1 = None
lines2 = None
if data1:
lines1 = self.parse_interfaces(data1)
if data2:
lines2 = self.parse_interfaces(data2)
if lines1 is not None and lines2 is not None:
self.facts['interfaces'] = self.populate_interfaces(lines1, lines2)
data3 = self.run(['show lldp neighbors'])
data3 = to_text(data3, errors='surrogate_or_strict').strip()
data3 = data3.replace(r"\n", "\n")
if data3:
lines3 = self.parse_neighbors(data3)
if lines3 is not None:
self.facts['neighbors'] = self.populate_neighbors(lines3)
data4 = self.run(['show ip interface brief vrf all'])
data5 = self.run(['show ipv6 interface brief vrf all'])
data4 = to_text(data4, errors='surrogate_or_strict').strip()
data4 = data4.replace(r"\n", "\n")
data5 = to_text(data5, errors='surrogate_or_strict').strip()
data5 = data5.replace(r"\n", "\n")
lines4 = None
lines5 = None
if data4:
lines4 = self.parse_ipaddresses(data4)
ipv4_interfaces = self.set_ip_interfaces(lines4)
self.facts['all_ipv4_addresses'] = ipv4_interfaces
if data5:
lines5 = self.parse_ipaddresses(data5)
ipv6_interfaces = self.set_ipv6_interfaces(lines5)
self.facts['all_ipv6_addresses'] = ipv6_interfaces
def parse_ipaddresses(self, data):
parsed = list()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
match = re.match(r'^(Ethernet+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(po+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(mgmt+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(loopback+)', line)
if match:
key = match.group(1)
parsed.append(line)
return parsed
def populate_interfaces(self, lines1, lines2):
interfaces = dict()
for line1, line2 in zip(lines1, lines2):
line = line1 + " " + line2
intfSplit = line.split()
innerData = dict()
innerData['description'] = intfSplit[1].strip()
innerData['macaddress'] = intfSplit[8].strip()
innerData['type'] = intfSplit[6].strip()
innerData['speed'] = intfSplit[5].strip()
innerData['duplex'] = intfSplit[4].strip()
innerData['operstatus'] = intfSplit[2].strip()
interfaces[intfSplit[0].strip()] = innerData
return interfaces
def parse_interfaces(self, data):
parsed = list()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
match = re.match(r'^(Ethernet+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(po+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(mgmt+)', line)
if match:
key = match.group(1)
parsed.append(line)
return parsed
def set_ip_interfaces(self, line4):
ipv4_addresses = list()
for line in line4:
ipv4Split = line.split()
if 'Ethernet' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
if 'mgmt' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
if 'po' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
if 'loopback' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
return ipv4_addresses
def set_ipv6_interfaces(self, line4):
ipv6_addresses = list()
for line in line4:
ipv6Split = line.split()
if 'Ethernet' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
if 'mgmt' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
if 'po' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
if 'loopback' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
return ipv6_addresses
def populate_neighbors(self, lines3):
neighbors = dict()
device_name = ''
for line in lines3:
neighborSplit = line.split()
innerData = dict()
count = len(neighborSplit)
if count == 5:
local_interface = neighborSplit[1].strip()
innerData['Device Name'] = neighborSplit[0].strip()
innerData['Hold Time'] = neighborSplit[2].strip()
innerData['Capability'] = neighborSplit[3].strip()
innerData['Remote Port'] = neighborSplit[4].strip()
neighbors[local_interface] = innerData
elif count == 4:
local_interface = neighborSplit[0].strip()
innerData['Hold Time'] = neighborSplit[1].strip()
innerData['Capability'] = neighborSplit[2].strip()
innerData['Remote Port'] = neighborSplit[3].strip()
neighbors[local_interface] = innerData
return neighbors
def parse_neighbors(self, neighbors):
parsed = list()
for line in neighbors.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
if 'Ethernet' in line:
parsed.append(line)
if 'mgmt' in line:
parsed.append(line)
if 'po' in line:
parsed.append(line)
if 'loopback' in line:
parsed.append(line)
return parsed
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
PERSISTENT_COMMAND_TIMEOUT = 60
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
warnings = list()
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
patrickstocklin/chattR
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/staticfiles/utils.py
|
248
|
import fnmatch
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
|
hbrunn/bank-payment
|
refs/heads/8.0
|
account_payment_purchase/tests/__init__.py
|
8
|
# -*- coding: utf-8 -*-
# (c) 2013-2015 Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import test_account_payment_purchase
|
apache/incubator-allura
|
refs/heads/master
|
Allura/allura/model/repo.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import logging
from hashlib import sha1
from itertools import chain
from datetime import datetime
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher, unified_diff
import bson
from pylons import tmpl_context as c
import pymongo.errors
from ming import Field, collection, Index
from ming import schema as S
from ming.base import Object
from ming.utils import LazyProperty
from ming.orm import mapper, session
from allura.lib import utils
from allura.lib import helpers as h
from allura.lib.security import has_access
from .auth import User
from .project import AppConfig
from .session import main_doc_session
from .session import repository_orm_session
from .timeline import ActivityObject
log = logging.getLogger(__name__)
# Some schema types
SUser = dict(name=str, email=str, date=datetime)
SObjType = S.OneOf('blob', 'tree', 'submodule')
# Used for when we're going to batch queries using $in
QSIZE = 100
README_RE = re.compile('^README(\.[^.]*)?$', re.IGNORECASE)
VIEWABLE_EXTENSIONS = [
'.php', '.py', '.js', '.java', '.html', '.htm', '.yaml', '.sh',
'.rb', '.phtml', '.txt', '.bat', '.ps1', '.xhtml', '.css', '.cfm', '.jsp', '.jspx',
'.pl', '.php4', '.php3', '.rhtml', '.svg', '.markdown', '.json', '.ini', '.tcl', '.vbs', '.xsl']
PYPELINE_EXTENSIONS = utils.MARKDOWN_EXTENSIONS + ['.rst']
DIFF_SIMILARITY_THRESHOLD = .5 # used for determining file renames
# Basic commit information
# One of these for each commit in the physical repo on disk. The _id is the
# hexsha of the commit (for Git and Hg).
CommitDoc = collection(
'repo_ci', main_doc_session,
Field('_id', str),
Field('tree_id', str),
Field('committed', SUser),
Field('authored', SUser),
Field('message', str),
Field('parent_ids', [str], index=True),
Field('child_ids', [str], index=True),
Field('repo_ids', [S.ObjectId()], index=True))
# Basic tree information (also see TreesDoc)
TreeDoc = collection(
'repo_tree', main_doc_session,
Field('_id', str),
Field('tree_ids', [dict(name=str, id=str)]),
Field('blob_ids', [dict(name=str, id=str)]),
Field('other_ids', [dict(name=str, id=str, type=SObjType)]))
# Information about the last commit to touch a tree
LastCommitDoc = collection(
'repo_last_commit', main_doc_session,
Field('_id', S.ObjectId()),
Field('commit_id', str),
Field('path', str),
Index('commit_id', 'path'),
Field('entries', [dict(
name=str,
commit_id=str)]))
# List of all trees contained within a commit
# TreesDoc._id = CommitDoc._id
# TreesDoc.tree_ids = [ TreeDoc._id, ... ]
TreesDoc = collection(
'repo_trees', main_doc_session,
Field('_id', str),
Field('tree_ids', [str]))
# Information about which things were added/removed in commit
# DiffInfoDoc._id = CommitDoc._id
DiffInfoDoc = collection(
'repo_diffinfo', main_doc_session,
Field('_id', str),
Field(
'differences',
[dict(name=str, lhs_id=str, rhs_id=str)]))
# List of commit runs (a run is a linear series of single-parent commits)
# CommitRunDoc.commit_ids = [ CommitDoc._id, ... ]
CommitRunDoc = collection(
'repo_commitrun', main_doc_session,
Field('_id', str),
Field('parent_commit_ids', [str], index=True),
Field('commit_ids', [str], index=True),
Field('commit_times', [datetime]))
class RepoObject(object):
def __repr__(self): # pragma no cover
return '<%s %s>' % (
self.__class__.__name__, self._id)
def primary(self):
return self
def index_id(self):
'''Globally unique artifact identifier. Used for
SOLR ID, shortlinks, and maybe elsewhere
'''
id = '%s.%s#%s' % (
self.__class__.__module__,
self.__class__.__name__,
self._id)
return id.replace('.', '/')
@classmethod
def upsert(cls, id, **kwargs):
isnew = False
r = cls.query.get(_id=id)
if r is not None:
return r, isnew
try:
r = cls(_id=id, **kwargs)
session(r).flush(r)
isnew = True
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(r).expunge(r)
r = cls.query.get(_id=id)
return r, isnew
class Commit(RepoObject, ActivityObject):
type_s = 'Commit'
# Ephemeral attrs
repo = None
def __init__(self, **kw):
for k, v in kw.iteritems():
setattr(self, k, v)
@property
def activity_name(self):
return self.shorthand_id()
@property
def activity_extras(self):
d = ActivityObject.activity_extras.fget(self)
d.update(summary=self.summary)
if self.repo:
d.update(app_config_id=self.repo.app.config._id)
return d
def has_activity_access(self, perm, user, activity):
"""
Check access against the original app.
Commits have no ACLs and are therefore always viewable by any user, if
they have access to the tool.
"""
app_config_id = activity.obj.activity_extras.get('app_config_id')
if app_config_id:
app_config = AppConfig.query.get(_id=app_config_id)
return has_access(app_config, perm, user)
return True
def set_context(self, repo):
self.repo = repo
@LazyProperty
def author_url(self):
u = User.by_email_address(self.authored.email)
if u:
return u.url()
@LazyProperty
def committer_url(self):
u = User.by_email_address(self.committed.email)
if u:
return u.url()
@LazyProperty
def tree(self):
return self.get_tree(create=True)
def get_tree(self, create=True):
if self.tree_id is None and create:
self.tree_id = self.repo.compute_tree_new(self)
if self.tree_id is None:
return None
cache = getattr(c, 'model_cache', '') or ModelCache()
t = cache.get(Tree, dict(_id=self.tree_id))
if t is None and create:
self.tree_id = self.repo.compute_tree_new(self)
t = Tree.query.get(_id=self.tree_id)
cache.set(Tree, dict(_id=self.tree_id), t)
if t is not None:
t.set_context(self)
return t
@LazyProperty
def summary(self):
message = h.really_unicode(self.message)
first_line = message.split('\n')[0]
return h.text.truncate(first_line, 50)
def shorthand_id(self):
if self.repo is None:
self.repo = self.guess_repo()
if self.repo is None:
return repr(self)
return self.repo.shorthand_for_commit(self._id)
@LazyProperty
def symbolic_ids(self):
return self.repo.symbolics_for_commit(self)
def get_parent(self, index=0):
'''Get the parent of this commit.
If there is no parent commit, or if an invalid index is given,
returns None.
'''
try:
cache = getattr(c, 'model_cache', '') or ModelCache()
ci = cache.get(Commit, dict(_id=self.parent_ids[index]))
if not ci:
return None
ci.set_context(self.repo)
return ci
except IndexError:
return None
def climb_commit_tree(self, predicate=None):
'''
Returns a generator that walks up the commit tree along
the first-parent ancestory, starting with this commit,
optionally filtering by a predicate.'''
ancestor = self
while ancestor:
if predicate is None or predicate(ancestor):
yield ancestor
ancestor = ancestor.get_parent()
def url(self):
if self.repo is None:
self.repo = self.guess_repo()
if self.repo is None:
return '#'
return self.repo.url_for_commit(self)
def guess_repo(self):
import traceback
log.error('guess_repo: should not be called: %s' %
''.join(traceback.format_stack()))
for ac in c.project.app_configs:
try:
app = c.project.app_instance(ac)
if app.repo._id in self.repo_ids:
return app.repo
except AttributeError:
pass
return None
def link_text(self):
'''The link text that will be used when a shortlink to this artifact
is expanded into an <a></a> tag.
By default this method returns type_s + shorthand_id(). Subclasses should
override this method to provide more descriptive link text.
'''
return self.shorthand_id()
def context(self):
result = dict(prev=None, next=None)
if self.parent_ids:
result['prev'] = self.query.find(
dict(_id={'$in': self.parent_ids})).all()
for ci in result['prev']:
ci.set_context(self.repo)
if self.child_ids:
result['next'] = self.query.find(
dict(_id={'$in': self.child_ids})).all()
for ci in result['next']:
ci.set_context(self.repo)
return result
@LazyProperty
def diffs(self):
return self.paged_diffs()
def paged_diffs(self, start=0, end=None):
di = DiffInfoDoc.m.get(_id=self._id)
if di is None:
return Object(added=[], removed=[], changed=[], copied=[], total=0)
added = []
removed = []
changed = []
copied = []
for change in di.differences[start:end]:
if change.rhs_id is None:
removed.append(change.name)
elif change.lhs_id is None:
added.append(change.name)
else:
changed.append(change.name)
copied = self._diffs_copied(added, removed)
return Object(
added=added, removed=removed,
changed=changed, copied=copied,
total=len(di.differences))
def _diffs_copied(self, added, removed):
'''Return list with file renames diffs.
Will change `added` and `removed` lists also.
'''
def _blobs_similarity(removed_blob, added):
best = dict(ratio=0, name='', blob=None)
for added_name in added:
added_blob = self.tree.get_obj_by_path(added_name)
if not isinstance(added_blob, Blob):
continue
diff = SequenceMatcher(None, removed_blob.text,
added_blob.text)
ratio = diff.quick_ratio()
if ratio > best['ratio']:
best['ratio'] = ratio
best['name'] = added_name
best['blob'] = added_blob
if ratio == 1:
break # we'll won't find better similarity than 100% :)
if best['ratio'] > DIFF_SIMILARITY_THRESHOLD:
diff = ''
if best['ratio'] < 1:
added_blob = best['blob']
rpath = ('a' + removed_blob.path()).encode('utf-8')
apath = ('b' + added_blob.path()).encode('utf-8')
diff = ''.join(unified_diff(list(removed_blob),
list(added_blob),
rpath, apath))
return dict(new=best['name'],
ratio=best['ratio'], diff=diff)
def _trees_similarity(removed_tree, added):
for added_name in added:
added_tree = self.tree.get_obj_by_path(added_name)
if not isinstance(added_tree, Tree):
continue
if removed_tree._id == added_tree._id:
return dict(new=added_name,
ratio=1, diff='')
if not removed:
return []
copied = []
prev_commit = self.get_parent()
for removed_name in removed[:]:
removed_blob = prev_commit.tree.get_obj_by_path(removed_name)
rename_info = None
if isinstance(removed_blob, Blob):
rename_info = _blobs_similarity(removed_blob, added)
elif isinstance(removed_blob, Tree):
rename_info = _trees_similarity(removed_blob, added)
if rename_info is not None:
rename_info['old'] = removed_name
copied.append(rename_info)
removed.remove(rename_info['old'])
added.remove(rename_info['new'])
return copied
def get_path(self, path, create=True):
path = path.lstrip('/')
parts = path.split('/')
cur = self.get_tree(create)
if cur is not None:
for part in parts:
if part != '':
cur = cur[part]
return cur
def has_path(self, path):
try:
self.get_path(path)
return True
except KeyError:
return False
@LazyProperty
def changed_paths(self):
'''
Returns a list of paths changed in this commit.
Leading and trailing slashes are removed, and
the list is complete, meaning that if a sub-path
is changed, all of the parent paths are included
(including '' to represent the root path).
Example:
If the file /foo/bar is changed in the commit,
this would return ['', 'foo', 'foo/bar']
'''
changes = self.repo.get_changes(self._id)
changed_paths = set()
for c in changes:
node = c.strip('/')
changed_paths.add(node)
node_path = os.path.dirname(node)
while node_path:
changed_paths.add(node_path)
node_path = os.path.dirname(node_path)
changed_paths.add('') # include '/' if there are any changes
return changed_paths
@LazyProperty
def added_paths(self):
'''
Returns a list of paths added in this commit.
Leading and trailing slashes are removed, and
the list is complete, meaning that if a directory
with subdirectories is added, all of the child
paths are included (this relies on the DiffInfoDoc
being complete).
Example:
If the directory /foo/bar/ is added in the commit
which contains a subdirectory /foo/bar/baz/ with
the file /foo/bar/baz/qux.txt, this would return:
['foo/bar', 'foo/bar/baz', 'foo/bar/baz/qux.txt']
'''
diff_info = DiffInfoDoc.m.get(_id=self._id)
diffs = set()
if diff_info:
for d in diff_info.differences:
if d.lhs_id is None:
diffs.add(d.name.strip('/'))
return diffs
@LazyProperty
def info(self):
return dict(
id=self._id,
author=self.authored.name,
author_email=self.authored.email,
date=self.authored.date,
author_url=self.author_url,
shortlink=self.shorthand_id(),
summary=self.summary
)
class Tree(RepoObject):
# Ephemeral attrs
repo = None
commit = None
parent = None
name = None
def compute_hash(self):
'''Compute a hash based on the contents of the tree. Note that this
hash does not necessarily correspond to any actual DVCS hash.
'''
lines = (
['tree' + x.name + x.id for x in self.tree_ids]
+ ['blob' + x.name + x.id for x in self.blob_ids]
+ [x.type + x.name + x.id for x in self.other_ids])
sha_obj = sha1()
for line in sorted(lines):
sha_obj.update(line)
return sha_obj.hexdigest()
def __getitem__(self, name):
cache = getattr(c, 'model_cache', '') or ModelCache()
obj = self.by_name[name]
if obj['type'] == 'blob':
return Blob(self, name, obj['id'])
if obj['type'] == 'submodule':
log.info('Skipping submodule "%s"' % name)
raise KeyError, name
obj = cache.get(Tree, dict(_id=obj['id']))
if obj is None:
oid = self.repo.compute_tree_new(
self.commit, self.path() + name + '/')
obj = cache.get(Tree, dict(_id=oid))
if obj is None:
raise KeyError, name
obj.set_context(self, name)
return obj
def get_obj_by_path(self, path):
if hasattr(path, 'get'):
path = path['new']
if path.startswith('/'):
path = path[1:]
path = path.split('/')
obj = self
for p in path:
try:
obj = obj[p]
except KeyError:
return None
return obj
def get_blob_by_path(self, path):
obj = self.get_obj_by_path(path)
return obj if isinstance(obj, Blob) else None
def set_context(self, commit_or_tree, name=None):
assert commit_or_tree is not self
self.repo = commit_or_tree.repo
if name:
self.commit = commit_or_tree.commit
self.parent = commit_or_tree
self.name = name
else:
self.commit = commit_or_tree
def readme(self):
'returns (filename, unicode text) if a readme file is found'
for x in self.blob_ids:
if README_RE.match(x.name):
name = x.name
blob = self[name]
return (x.name, h.really_unicode(blob.text))
return None, None
def ls(self):
'''
List the entries in this tree, with historical commit info for
each node.
'''
last_commit = LastCommit.get(self)
# ensure that the LCD is saved, even if
# there is an error later in the request
if last_commit:
session(last_commit).flush(last_commit)
return self._lcd_map(last_commit)
else:
return []
def _lcd_map(self, lcd):
if lcd is None:
return []
commit_ids = [e.commit_id for e in lcd.entries]
commits = list(Commit.query.find(dict(_id={'$in': commit_ids})))
for commit in commits:
commit.set_context(self.repo)
commit_infos = {c._id: c.info for c in commits}
by_name = lambda n: n.name
tree_names = sorted([n.name for n in self.tree_ids])
blob_names = sorted(
[n.name for n in chain(self.blob_ids, self.other_ids)])
results = []
for type, names in (('DIR', tree_names), ('BLOB', blob_names)):
for name in names:
commit_info = commit_infos.get(lcd.by_name.get(name))
if not commit_info:
commit_info = defaultdict(str)
elif 'id' in commit_info:
commit_info['href'] = self.repo.url_for_commit(
commit_info['id'])
results.append(dict(
kind=type,
name=name,
href=name,
last_commit=dict(
author=commit_info['author'],
author_email=commit_info['author_email'],
author_url=commit_info['author_url'],
date=commit_info.get('date'),
href=commit_info.get('href', ''),
shortlink=commit_info['shortlink'],
summary=commit_info['summary'],
),
))
return results
def path(self):
if self.parent:
assert self.parent is not self
return self.parent.path() + self.name + '/'
else:
return '/'
def url(self):
return self.commit.url() + 'tree' + self.path()
@LazyProperty
def by_name(self):
d = Object((x.name, x) for x in self.other_ids)
d.update(
(x.name, Object(x, type='tree'))
for x in self.tree_ids)
d.update(
(x.name, Object(x, type='blob'))
for x in self.blob_ids)
return d
def is_blob(self, name):
return self.by_name[name]['type'] == 'blob'
def get_blob(self, name):
x = self.by_name[name]
return Blob(self, name, x.id)
class Blob(object):
'''Lightweight object representing a file in the repo'''
def __init__(self, tree, name, _id):
self._id = _id
self.tree = tree
self.name = name
self.repo = tree.repo
self.commit = tree.commit
fn, ext = os.path.splitext(self.name)
self.extension = ext or fn
def path(self):
return self.tree.path() + h.really_unicode(self.name)
def url(self):
return self.tree.url() + h.really_unicode(self.name)
@LazyProperty
def _content_type_encoding(self):
return self.repo.guess_type(self.name)
@LazyProperty
def content_type(self):
return self._content_type_encoding[0]
@LazyProperty
def content_encoding(self):
return self._content_type_encoding[1]
@property
def has_pypeline_view(self):
if README_RE.match(self.name) or self.extension in PYPELINE_EXTENSIONS:
return True
return False
@property
def has_html_view(self):
if (self.content_type.startswith('text/') or
self.extension in VIEWABLE_EXTENSIONS or
self.extension in PYPELINE_EXTENSIONS or
self.extension in self.repo._additional_viewable_extensions or
utils.is_text_file(self.text)):
return True
return False
@property
def has_image_view(self):
return self.content_type.startswith('image/')
def open(self):
return self.repo.open_blob(self)
def __iter__(self):
return iter(self.open())
@LazyProperty
def size(self):
return self.repo.blob_size(self)
@LazyProperty
def text(self):
return self.open().read()
@classmethod
def diff(cls, v0, v1):
differ = SequenceMatcher(v0, v1)
return differ.get_opcodes()
class LastCommit(RepoObject):
def __repr__(self):
return '<LastCommit /%s %s>' % (self.path, self.commit_id)
@classmethod
def _last_commit_id(cls, commit, path):
try:
rev = commit.repo.log(commit._id, path, id_only=True).next()
return commit.repo.rev_to_commit_id(rev)
except StopIteration:
log.error('Tree node not recognized by SCM: %s @ %s',
path, commit._id)
return commit._id
@classmethod
def _prev_commit_id(cls, commit, path):
if not commit.parent_ids or path in commit.added_paths:
return None # new paths by definition have no previous LCD
lcid_cache = getattr(c, 'lcid_cache', '')
if lcid_cache != '' and path in lcid_cache:
return lcid_cache[path]
try:
log_iter = commit.repo.log(commit._id, path, id_only=True)
log_iter.next()
rev = log_iter.next()
return commit.repo.rev_to_commit_id(rev)
except StopIteration:
return None
@classmethod
def get(cls, tree):
'''Find or build the LastCommitDoc for the given tree.'''
cache = getattr(c, 'model_cache', '') or ModelCache()
path = tree.path().strip('/')
last_commit_id = cls._last_commit_id(tree.commit, path)
lcd = cache.get(cls, {'path': path, 'commit_id': last_commit_id})
if lcd is None:
commit = cache.get(Commit, {'_id': last_commit_id})
commit.set_context(tree.repo)
lcd = cls._build(commit.get_path(path))
return lcd
@classmethod
def _build(cls, tree):
'''
Build the LCD record, presuming that this tree is where it was most
recently changed.
'''
model_cache = getattr(c, 'model_cache', '') or ModelCache()
path = tree.path().strip('/')
entries = []
prev_lcd = None
prev_lcd_cid = cls._prev_commit_id(tree.commit, path)
if prev_lcd_cid:
prev_lcd = model_cache.get(
cls, {'path': path, 'commit_id': prev_lcd_cid})
entries = {}
nodes = set(
[node.name for node in chain(tree.tree_ids, tree.blob_ids, tree.other_ids)])
changed = set(
[node for node in nodes if os.path.join(path, node) in tree.commit.changed_paths])
unchanged = [os.path.join(path, node) for node in nodes - changed]
if prev_lcd:
# get unchanged entries from previously computed LCD
entries = prev_lcd.by_name
elif unchanged:
# no previously computed LCD, so get unchanged entries from SCM
# (but only ask for the ones that we know we need)
entries = tree.commit.repo.last_commit_ids(tree.commit, unchanged)
if entries is None:
# something strange went wrong; still show the list of files
# and possibly try again later
entries = {}
# paths are fully-qualified; shorten them back to just node names
entries = {
os.path.basename(path): commit_id for path, commit_id in entries.iteritems()}
# update with the nodes changed in this tree's commit
entries.update({node: tree.commit._id for node in changed})
# convert to a list of dicts, since mongo doesn't handle arbitrary keys
# well (i.e., . and $ not allowed)
entries = [{'name': name, 'commit_id': value}
for name, value in entries.iteritems()]
lcd = cls(
commit_id=tree.commit._id,
path=path,
entries=entries,
)
model_cache.set(cls, {'path': path, 'commit_id': tree.commit._id}, lcd)
return lcd
@LazyProperty
def by_name(self):
return {n.name: n.commit_id for n in self.entries}
mapper(Commit, CommitDoc, repository_orm_session)
mapper(Tree, TreeDoc, repository_orm_session)
mapper(LastCommit, LastCommitDoc, repository_orm_session)
class ModelCache(object):
'''
Cache model instances based on query params passed to get.
'''
def __init__(self, max_instances=None, max_queries=None):
'''
By default, each model type can have 2000 instances and
8000 queries. You can override these for specific model
types by passing in a dict() for either max_instances or
max_queries keyed by the class(es) with the max values.
Classes not in the dict() will use the default 2000/8000
default.
If you pass in a number instead of a dict, that value will
be used as the max for all classes.
'''
max_instances_default = 2000
max_queries_default = 8000
if isinstance(max_instances, int):
max_instances_default = max_instances
if isinstance(max_queries, int):
max_queries_default = max_queries
self._max_instances = defaultdict(lambda: max_instances_default)
self._max_queries = defaultdict(lambda: max_queries_default)
if hasattr(max_instances, 'items'):
self._max_instances.update(max_instances)
if hasattr(max_queries, 'items'):
self._max_queries.update(max_queries)
# keyed by query, holds _id
self._query_cache = defaultdict(OrderedDict)
self._instance_cache = defaultdict(OrderedDict) # keyed by _id
self._synthetic_ids = defaultdict(set)
self._synthetic_id_queries = defaultdict(set)
def _normalize_query(self, query):
_query = query
if not isinstance(_query, tuple):
_query = tuple(sorted(_query.items(), key=lambda k: k[0]))
return _query
def _model_query(self, cls):
if hasattr(cls, 'query'):
return cls.query
elif hasattr(cls, 'm'):
return cls.m
else:
raise AttributeError(
'%s has neither "query" nor "m" attribute' % cls)
def get(self, cls, query):
_query = self._normalize_query(query)
self._touch(cls, _query)
if _query not in self._query_cache[cls]:
val = self._model_query(cls).get(**query)
self.set(cls, _query, val)
return val
_id = self._query_cache[cls][_query]
if _id is None:
return None
if _id not in self._instance_cache[cls]:
val = self._model_query(cls).get(**query)
self.set(cls, _query, val)
return val
return self._instance_cache[cls][_id]
def set(self, cls, query, val):
_query = self._normalize_query(query)
if val is not None:
_id = getattr(val, '_model_cache_id',
getattr(val, '_id',
self._query_cache[cls].get(_query,
None)))
if _id is None:
_id = val._model_cache_id = bson.ObjectId()
self._synthetic_ids[cls].add(_id)
if _id in self._synthetic_ids:
self._synthetic_id_queries[cls].add(_query)
self._query_cache[cls][_query] = _id
self._instance_cache[cls][_id] = val
else:
self._query_cache[cls][_query] = None
self._touch(cls, _query)
self._check_sizes(cls)
def _touch(self, cls, query):
'''
Keep track of insertion order, prevent duplicates,
and expire from the cache in a FIFO manner.
'''
_query = self._normalize_query(query)
if _query not in self._query_cache[cls]:
return
_id = self._query_cache[cls].pop(_query)
self._query_cache[cls][_query] = _id
if _id not in self._instance_cache[cls]:
return
val = self._instance_cache[cls].pop(_id)
self._instance_cache[cls][_id] = val
def _check_sizes(self, cls):
if self.num_queries(cls) > self._max_queries[cls]:
_id = self._remove_least_recently_used(self._query_cache[cls])
if _id in self._instance_cache[cls]:
instance = self._instance_cache[cls][_id]
self._try_flush(instance, expunge=False)
if self.num_instances(cls) > self._max_instances[cls]:
instance = self._remove_least_recently_used(
self._instance_cache[cls])
self._try_flush(instance, expunge=True)
def _try_flush(self, instance, expunge=False):
try:
inst_session = session(instance)
except AttributeError:
inst_session = None
if inst_session:
inst_session.flush(instance)
if expunge:
inst_session.expunge(instance)
def _remove_least_recently_used(self, cache):
# last-used (most-recently-used) is last in cache, so take first
key, val = cache.popitem(last=False)
return val
def expire_new_instances(self, cls):
'''
Expire any instances that were "new" or had no _id value.
If a lot of new instances of a class are being created, it's possible
for a query to pull a copy from mongo when a copy keyed by the synthetic
ID is still in the cache, potentially causing de-sync between the copies
leading to one with missing data overwriting the other. Clear new
instances out of the cache relatively frequently (depending on the query
and instance cache sizes) to avoid this.
'''
for _query in self._synthetic_id_queries[cls]:
self._query_cache[cls].pop(_query)
self._synthetic_id_queries[cls] = set()
for _id in self._synthetic_ids[cls]:
instance = self._instance_cache[cls].pop(_id)
self._try_flush(instance, expunge=True)
self._synthetic_ids[cls] = set()
def num_queries(self, cls=None):
if cls is None:
return sum([len(c) for c in self._query_cache.values()])
else:
return len(self._query_cache[cls])
def num_instances(self, cls=None):
if cls is None:
return sum([len(c) for c in self._instance_cache.values()])
else:
return len(self._instance_cache[cls])
def instance_ids(self, cls):
return self._instance_cache[cls].keys()
def batch_load(self, cls, query, attrs=None):
'''
Load multiple results given a query.
Optionally takes a list of attribute names to use
as the cache key. If not given, uses the keys of
the given query.
'''
if attrs is None:
attrs = query.keys()
for result in self._model_query(cls).find(query):
keys = {a: getattr(result, a) for a in attrs}
self.set(cls, keys, result)
|
Neitsch/ASE4156
|
refs/heads/master
|
trading/migrations/0005_auto_20171015_1523.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-15 15:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trading', '0004_auto_20170920_0123'),
]
operations = [
migrations.AlterField(
model_name='trade',
name='quantity',
field=models.FloatField(validators=[django.core.validators.MinValueValidator(0, message='Daily stock quote can not be negative')]),
),
]
|
ilexius/odoo
|
refs/heads/master
|
addons/account_budget/__init__.py
|
47
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_budget
import report
import wizard
|
alxyang/mysql-5.6
|
refs/heads/fb-mysql-5.6.35
|
plugin/innodb_memcached/daemon_memcached/testsuite/breakdancer/breakdancer.py
|
201
|
#!/usr/bin/env python
import itertools
class Condition(object):
"""Something asserted to be true during the test.
A given condition may be used as a precondition or a
postcondition."""
def __call__(self, k, state):
"""Called with a key and a state. True if the condition is met."""
return True
class Effect(object):
"""The affect an action will perform."""
def __call__(self, k, state):
"""Called with a key and a state.
The effect modifies the state as appropriate."""
class Action(object):
"""Actions are the operations that will be permuted into test cases.
Each action has a collection of preconditions and postconditions
that will be evaluated for checking input and output state for the
action.
Action.preconditions is the collection of conditions that must all
be true upon input to the action. If any condition is not true,
the effect is not executed and the action state is considered
"errored."
Action.effect is the callable that is expected to alter the state
to satisfy the postconditions of the action.
Action.postconditions is the collection of conditions that must
all be true after the effect of the action completes.
"""
preconditions = []
effect = None
postconditions = []
enabled = True
@property
def name(self):
"""The name of this action (default derived from class name)"""
n = self.__class__.__name__
return n[0].lower() + n[1:]
class Driver(object):
"""The driver "performs" the test."""
def newState(self):
"""Initialize and return the state for a test."""
return {}
def preSuite(self, seq):
"""Invoked with the sequence of tests before any are run."""
def startSequence(self, seq):
"""Invoked with the sequence of actions in a single test
before it is performed."""
def startAction(self, action):
"""Invoked when before starting an action."""
def endAction(self, action, state, errored):
"""Invoked after the action is performed."""
def endSequence(self, seq, state):
"""Invoked at the end of a sequence of tests."""
def postSuite(self, seq):
"""Invoked with the sequence of tests after all of them are run."""
def runTest(actions, driver, duplicates=3, length=4):
"""Run a test with the given collection of actions and driver.
The optional argument `duplicates' specifies how many times a
given action may be duplicated in a sequence.
The optional argument `length` specifies how long each test
sequence is.
"""
instances = itertools.chain(*itertools.repeat([a() for a in actions],
duplicates))
tests = set(itertools.permutations(instances, length))
driver.preSuite(tests)
for seq in sorted(tests):
state = driver.newState()
driver.startSequence(seq)
for a in seq:
driver.startAction(a)
haserror = not all(p(state) for p in a.preconditions)
if not haserror:
try:
a.effect(state)
haserror = not all(p(state) for p in a.postconditions)
except:
haserror = True
driver.endAction(a, state, haserror)
driver.endSequence(seq, state)
driver.postSuite(tests)
def findActions(classes):
"""Helper function to extract action subclasses from a collection
of classes."""
actions = []
for __t in (t for t in classes if isinstance(type, type(t))):
if Action in __t.__mro__ and __t != Action and __t.enabled:
actions.append(__t)
return actions
|
2014c2g5/2014c2
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/__init__.py
|
900
|
"""
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
|
eunchong/build
|
refs/heads/master
|
third_party/twisted_10_2/twisted/internet/_threadedselect.py
|
61
|
# -*- test-case-name: twisted.test.test_internet -*-
# $Id: default.py,v 1.90 2004/01/06 22:35:22 warner Exp $
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import generators
"""
Threaded select reactor
Maintainer: Bob Ippolito
The threadedselectreactor is a specialized reactor for integrating with
arbitrary foreign event loop, such as those you find in GUI toolkits.
There are three things you'll need to do to use this reactor.
Install the reactor at the beginning of your program, before importing
the rest of Twisted::
| from twisted.internet import _threadedselect
| _threadedselect.install()
Interleave this reactor with your foreign event loop, at some point after
your event loop is initialized::
| from twisted.internet import reactor
| reactor.interleave(foreignEventLoopWakerFunction)
| self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop)
Instead of shutting down the foreign event loop directly, shut down the
reactor::
| from twisted.internet import reactor
| reactor.stop()
In order for Twisted to do its work in the main thread (the thread that
interleave is called from), a waker function is necessary. The waker function
will be called from a "background" thread with one argument: func.
The waker function's purpose is to call func() from the main thread.
Many GUI toolkits ship with appropriate waker functions.
Some examples of this are wxPython's wx.callAfter (may be wxCallAfter in
older versions of wxPython) or PyObjC's PyObjCTools.AppHelper.callAfter.
These would be used in place of "foreignEventLoopWakerFunction" in the above
example.
The other integration point at which the foreign event loop and this reactor
must integrate is shutdown. In order to ensure clean shutdown of Twisted,
you must allow for Twisted to come to a complete stop before quitting the
application. Typically, you will do this by setting up an after shutdown
trigger to stop your foreign event loop, and call reactor.stop() where you
would normally have initiated the shutdown procedure for the foreign event
loop. Shutdown functions that could be used in place of
"foreignEventloopStop" would be the ExitMainLoop method of the wxApp instance
with wxPython, or the PyObjCTools.AppHelper.stopEventLoop function.
"""
from threading import Thread
from Queue import Queue, Empty
from time import sleep
import sys
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import error
from twisted.internet import posixbase
from twisted.python import log, failure, threadable
from twisted.persisted import styles
from twisted.python.runtime import platformType
import select
from errno import EINTR, EBADF
from twisted.internet.selectreactor import _select
# Exceptions that doSelect might return frequently
_NO_FILENO = error.ConnectionFdescWentAway('Handler has no fileno method')
_NO_FILEDESC = error.ConnectionFdescWentAway('Filedescriptor went away')
def dictRemove(dct, value):
try:
del dct[value]
except KeyError:
pass
def raiseException(e):
raise e
class ThreadedSelectReactor(posixbase.PosixReactorBase):
"""A threaded select() based reactor - runs on all POSIX platforms and on
Win32.
"""
implements(IReactorFDSet)
def __init__(self):
threadable.init(1)
self.reads = {}
self.writes = {}
self.toThreadQueue = Queue()
self.toMainThread = Queue()
self.workerThread = None
self.mainWaker = None
posixbase.PosixReactorBase.__init__(self)
self.addSystemEventTrigger('after', 'shutdown', self._mainLoopShutdown)
def wakeUp(self):
# we want to wake up from any thread
self.waker.wakeUp()
def callLater(self, *args, **kw):
tple = posixbase.PosixReactorBase.callLater(self, *args, **kw)
self.wakeUp()
return tple
def _sendToMain(self, msg, *args):
#print >>sys.stderr, 'sendToMain', msg, args
self.toMainThread.put((msg, args))
if self.mainWaker is not None:
self.mainWaker()
def _sendToThread(self, fn, *args):
#print >>sys.stderr, 'sendToThread', fn, args
self.toThreadQueue.put((fn, args))
def _preenDescriptorsInThread(self):
log.msg("Malformed file descriptor found. Preening lists.")
readers = self.reads.keys()
writers = self.writes.keys()
self.reads.clear()
self.writes.clear()
for selDict, selList in ((self.reads, readers), (self.writes, writers)):
for selectable in selList:
try:
select.select([selectable], [selectable], [selectable], 0)
except:
log.msg("bad descriptor %s" % selectable)
else:
selDict[selectable] = 1
def _workerInThread(self):
try:
while 1:
fn, args = self.toThreadQueue.get()
#print >>sys.stderr, "worker got", fn, args
fn(*args)
except SystemExit:
pass # exception indicates this thread should exit
except:
f = failure.Failure()
self._sendToMain('Failure', f)
#print >>sys.stderr, "worker finished"
def _doSelectInThread(self, timeout):
"""Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
reads = self.reads
writes = self.writes
while 1:
try:
r, w, ignored = _select(reads.keys(),
writes.keys(),
[], timeout)
break
except ValueError, ve:
# Possibly a file descriptor has gone negative?
log.err()
self._preenDescriptorsInThread()
except TypeError, te:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptorsInThread()
except (select.error, IOError), se:
# select(2) encountered an error
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not reads) and (not writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptorsInThread()
else:
# OK, I really don't know what's going on. Blow up.
raise
self._sendToMain('Notify', r, w)
def _process_Notify(self, r, w):
#print >>sys.stderr, "_process_Notify"
reads = self.reads
writes = self.writes
_drdw = self._doReadOrWrite
_logrun = log.callWithLogger
for selectables, method, dct in ((r, "doRead", reads), (w, "doWrite", writes)):
for selectable in selectables:
# if this was disconnected in another thread, kill it.
if selectable not in dct:
continue
# This for pausing input when we're not ready for more.
_logrun(selectable, _drdw, selectable, method, dct)
#print >>sys.stderr, "done _process_Notify"
def _process_Failure(self, f):
f.raiseException()
_doIterationInThread = _doSelectInThread
def ensureWorkerThread(self):
if self.workerThread is None or not self.workerThread.isAlive():
self.workerThread = Thread(target=self._workerInThread)
self.workerThread.start()
def doThreadIteration(self, timeout):
self._sendToThread(self._doIterationInThread, timeout)
self.ensureWorkerThread()
#print >>sys.stderr, 'getting...'
msg, args = self.toMainThread.get()
#print >>sys.stderr, 'got', msg, args
getattr(self, '_process_' + msg)(*args)
doIteration = doThreadIteration
def _interleave(self):
while self.running:
#print >>sys.stderr, "runUntilCurrent"
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self._sendToThread(self._doIterationInThread, t)
#print >>sys.stderr, "yielding"
yield None
#print >>sys.stderr, "fetching"
msg, args = self.toMainThread.get_nowait()
getattr(self, '_process_' + msg)(*args)
def interleave(self, waker, *args, **kw):
"""
interleave(waker) interleaves this reactor with the
current application by moving the blocking parts of
the reactor (select() in this case) to a separate
thread. This is typically useful for integration with
GUI applications which have their own event loop
already running.
See the module docstring for more information.
"""
self.startRunning(*args, **kw)
loop = self._interleave()
def mainWaker(waker=waker, loop=loop):
#print >>sys.stderr, "mainWaker()"
waker(loop.next)
self.mainWaker = mainWaker
loop.next()
self.ensureWorkerThread()
def _mainLoopShutdown(self):
self.mainWaker = None
if self.workerThread is not None:
#print >>sys.stderr, 'getting...'
self._sendToThread(raiseException, SystemExit)
self.wakeUp()
try:
while 1:
msg, args = self.toMainThread.get_nowait()
#print >>sys.stderr, "ignored:", (msg, args)
except Empty:
pass
self.workerThread.join()
self.workerThread = None
try:
while 1:
fn, args = self.toThreadQueue.get_nowait()
if fn is self._doIterationInThread:
log.msg('Iteration is still in the thread queue!')
elif fn is raiseException and args[0] is SystemExit:
pass
else:
fn(*args)
except Empty:
pass
def _doReadOrWrite(self, selectable, method, dict):
try:
why = getattr(selectable, method)()
handfn = getattr(selectable, 'fileno', None)
if not handfn:
why = _NO_FILENO
elif handfn() == -1:
why = _NO_FILEDESC
except:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, method == "doRead")
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
self._sendToThread(self.reads.__setitem__, reader, 1)
self.wakeUp()
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
self._sendToThread(self.writes.__setitem__, writer, 1)
self.wakeUp()
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
self._sendToThread(dictRemove, self.reads, reader)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
self._sendToThread(dictRemove, self.writes, writer)
def removeAll(self):
return self._removeAll(self.reads, self.writes)
def getReaders(self):
return self.reads.keys()
def getWriters(self):
return self.writes.keys()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.mainLoop()
def mainLoop(self):
q = Queue()
self.interleave(q.put)
while self.running:
try:
q.get()()
except StopIteration:
break
def install():
"""Configure the twisted mainloop to be run using the select() reactor.
"""
reactor = ThreadedSelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
|
kissmetrics/spark
|
refs/heads/master
|
examples/src/main/python/mllib/elementwise_product_example.py
|
106
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.feature import ElementwiseProduct
from pyspark.mllib.linalg import Vectors
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="ElementwiseProductExample") # SparkContext
# $example on$
data = sc.textFile("data/mllib/kmeans_data.txt")
parsedData = data.map(lambda x: [float(t) for t in x.split(" ")])
# Create weight vector.
transformingVector = Vectors.dense([0.0, 1.0, 2.0])
transformer = ElementwiseProduct(transformingVector)
# Batch transform
transformedData = transformer.transform(parsedData)
# Single-row transform
transformedData2 = transformer.transform(parsedData.first())
# $example off$
print("transformedData:")
for each in transformedData.collect():
print(each)
print("transformedData2:")
for each in transformedData2:
print(each)
sc.stop()
|
apache/spark
|
refs/heads/master
|
resource-managers/kubernetes/integration-tests/tests/decommissioning_cleanup.py
|
23
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import time
from pyspark.sql import SparkSession
if __name__ == "__main__":
"""
Usage: decommissioning
"""
print("Starting decom test")
spark = SparkSession \
.builder \
.appName("DecomTest") \
.getOrCreate()
sc = spark._sc
acc = sc.accumulator(0)
def addToAcc(x):
acc.add(1)
return x
initialRdd = sc.parallelize(range(100), 5)
accRdd = initialRdd.map(addToAcc)
# Trigger a shuffle so there are shuffle blocks to migrate
rdd = accRdd.map(lambda x: (x, x)).groupByKey()
# Make enough shuffle files to increase the chance of the race condition.
for i in range(1, 2):
shuffleRdd = sc.parallelize(range(1, 10), 5).map(lambda x: (x, x)).groupByKey()
shuffleRdd.collect()
rdd.collect()
print("1st accumulator value is: " + str(acc.value))
print("Waiting to give nodes time to finish migration, decom exec 1.")
print("...")
time.sleep(30)
rdd.count()
rdd.collect()
print("Final accumulator value is: " + str(acc.value))
print("Finished waiting, stopping Spark.")
spark.stop()
print("Done, exiting Python")
sys.exit(0)
|
bitcity/django
|
refs/heads/master
|
tests/cache/liberal_backend.py
|
446
|
from django.core.cache.backends.locmem import LocMemCache
class LiberalKeyValidationMixin(object):
def validate_key(self, key):
pass
class CacheClass(LiberalKeyValidationMixin, LocMemCache):
pass
|
nickthecoder/itchy
|
refs/heads/master
|
resources/The-Mings/scripts/director.py
|
1
|
from common import *
# The size of the pixelations
PIXELATION_SIZE = 6
game = Itchy.getGame()
class Director(AbstractDirector) :
def onStarted( self ) :
self.inputQuit = Input.find("quit")
guiHeight = 100
playRect = Rect(0, 0, game.getWidth(), game.getHeight() - guiHeight)
guiRect = Rect(0, game.getHeight() - guiHeight, game.getWidth(), guiHeight )
def onMessage(self,message) :
if message == Director.SPRITE_SHEETS_LOADED :
self.processSpriteSheet()
if message == Director.POSES_LOADED :
self.processPoses()
def onKeyDown(self,kevent) :
if self.inputQuit.matches(kevent) :
scene = game.sceneName
if scene == "menu" :
game.end()
else :
game.startScene( "menu" )
def scrollTo( self, x, y ) :
game.layout.findView("main").centerOn( x,y )
game.layout.findView("grid").centerOn( x,y )
def processSpriteSheet(self) :
start_time = time.time()
self.pixelator = Pixelator( PIXELATION_SIZE, RGBA(255,255,255,128), RGBA(0,0,0,128) )
self.buttonPixelator = Pixelator( 4, RGBA(255,255,255,128), RGBA(0,0,0,128) )
self.pixelateSprites( "tiny-blocker-", "blocker-", 16 )
self.pixelateSprites( "tiny-bomber-", "bomber-", 9 )
self.pixelateSprites( "tiny-builder-", "builderR-", 16 )
self.pixelateSprites( "tiny-builder-", "builderL-", 16, True )
self.pixelateSprites( "tiny-digger-", "diggerA-", 8, False, -PIXELATION_SIZE/2, 0 )
self.pixelateSprites( "tiny-digger-", "diggerB-", 8, True , PIXELATION_SIZE/2, 0 )
self.pixelateSprites( "tiny-explode-", "explode-", 14 )
self.pixelateSprites( "tiny-faller-", "fallerA-", 4 )
self.pixelateSprites( "tiny-faller-", "fallerB-", 4, True )
self.pixelateSprites( "tiny-floaterA-", "floaterA-", 4 )
self.pixelateSprites( "tiny-floaterB-", "floaterB-", 4 )
self.pixelateSprites( "tiny-floaterB-", "floaterC-", 4, True )
self.pixelateSprites( "tiny-miner-", "minerR-", 24 )
self.pixelateSprites( "tiny-miner-", "minerL-", 24, True )
self.pixelateSprites( "tiny-splat-", "splat-", 16 )
self.pixelateSprites( "tiny-smasher-", "smasherR-", 32 )
self.pixelateSprites( "tiny-smasher-", "smasherL-", 32, True )
self.pixelateSprites( "tiny-walker-", "walkerR-", 8 )
self.pixelateSprites( "tiny-walker-", "walkerL-", 8, True )
self.createButton( "tiny-buttonBlocker", "buttonBlocker" )
self.createButton( "tiny-buttonBuilder", "buttonBuilder" )
self.createButton( "tiny-buttonClimber", "buttonClimber" )
self.createButton( "tiny-buttonDigger" , "buttonDigger" )
self.createButton( "tiny-buttonSmasher", "buttonSmasher" )
self.createButton( "tiny-buttonFloater", "buttonFloater" )
self.pixelateSprite( "tiny-brick", "brick" )
self.pixelateSprite( "tiny-highlight", "highlight" )
self.pixelateSprite( "tiny-lookDown", "lookDown" )
self.pixelateSprite( "tiny-lookLeft", "lookLeft" )
self.pixelateSprite( "tiny-lookRight", "lookRight" )
print "Processed images in", time.time() - start_time, "seconds"
def processPoses(self) :
self.convertToMask( "crater", "craterMask" )
self.convertToMask( "smashL", "smashLMask" )
self.convertToMask( "smashR", "smashRMask" )
self.convertToMask( "dig", "digMask" )
def convertToMask( self, poseName, newName ) :
# Convert the image, so that it is suitable as a mask.
srcPose = game.resources.getPose( poseName )
if srcPose is None :
print "Pose", poseName, "not found."
return
srcSurface = srcPose.surface
newSurface = Surface( srcSurface.width, srcSurface.height, True )
newSurface.fill( RGBA( 255,255,255,255 ) )
srcSurface.blit( newSurface, Surface.BlendMode.RGBA_SUB )
newPose = ImagePose( newSurface, srcPose.offsetX, srcPose.offsetY )
game.resources.addPose( DynamicPoseResource( newName, newPose) )
def createButton( self, sourceName, destinationName ) :
source = game.resources.getPose( sourceName )
if source is None :
print "Pose", sourceName, "not found"
else :
surface = self.buttonPixelator.pixelate( source.surface )
pose = ImagePose( surface )
pose.offsetX = surface.width/2
pose.offsetY = surface.height/2
poseResource = DynamicPoseResource( destinationName, pose )
game.resources.addPose( poseResource )
def pixelateSprites( self, sourcePrefix, destinationPrefix, amount, flip = False, dx = 0, dy = 0 ) :
for i in range(0,amount) :
sourceName = sourcePrefix + str(i+1).zfill(2)
destinationName = destinationPrefix + str(i+1).zfill(2)
self.pixelateSprite( sourceName, destinationName, flip, dx, dy )
def pixelateSprite( self, sourceName, destinationName, flip = False, dx = 0, dy = 0 ) :
pose = game.resources.getPose( sourceName )
if pose is None :
print "Pose", sourceName, "not found"
else :
newPose = self.pixelator.pixelate( pose, flip, dx, dy )
newPoseResource = DynamicPoseResource( destinationName, newPose )
game.resources.addPose( newPoseResource )
# Boiler plate code - no need to change this
def getClassName(self):
return ClassName( Director, self.__module__ + ".py" )
|
golismero/golismero-fingerprinter
|
refs/heads/master
|
netaddr/strategy/ipv6.py
|
9
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2013, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
IPv6 address logic.
"""
import struct as _struct
OPT_IMPORTS = False
# Check whether we need to use fallback code or not.
try:
import socket as _socket
# These might all generate exceptions on different platforms.
if not _socket.has_ipv6:
raise Exception('IPv6 disabled')
_socket.inet_pton
_socket.AF_INET6
from _socket import inet_pton as _inet_pton, \
inet_ntop as _inet_ntop, \
AF_INET6
OPT_IMPORTS = True
except:
from netaddr.fbsocket import inet_pton as _inet_pton, \
inet_ntop as _inet_ntop, \
AF_INET6
from netaddr.core import AddrFormatError
from netaddr.strategy import BYTES_TO_BITS as _BYTES_TO_BITS, \
valid_words as _valid_words, \
int_to_words as _int_to_words, \
words_to_int as _words_to_int, \
valid_bits as _valid_bits, \
bits_to_int as _bits_to_int, \
int_to_bits as _int_to_bits, \
valid_bin as _valid_bin, \
int_to_bin as _int_to_bin, \
bin_to_int as _bin_to_int
#: The width (in bits) of this address type.
width = 128
#: The individual word size (in bits) of this address type.
word_size = 16
#: The separator character used between each word.
word_sep = ':'
#: The AF_* constant value of this address type.
family = AF_INET6
#: A friendly string name address type.
family_name = 'IPv6'
#: The version of this address type.
version = 6
#: The number base to be used when interpreting word values as integers.
word_base = 16
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: A dictionary mapping IPv6 CIDR prefixes to the equivalent netmasks.
prefix_to_netmask = dict(
[(i, max_int ^ (2 ** (width - i) - 1)) for i in range(0, width+1)])
#: A dictionary mapping IPv6 netmasks to their equivalent CIDR prefixes.
netmask_to_prefix = dict(
[(max_int ^ (2 ** (width - i) - 1), i) for i in range(0, width+1)])
#: A dictionary mapping IPv6 CIDR prefixes to the equivalent hostmasks.
prefix_to_hostmask = dict(
[(i, (2 ** (width - i) - 1)) for i in range(0, width+1)])
#: A dictionary mapping IPv6 hostmasks to their equivalent CIDR prefixes.
hostmask_to_prefix = dict(
[((2 ** (width - i) - 1), i) for i in range(0, width+1)])
#-----------------------------------------------------------------------------
# Dialect classes.
#-----------------------------------------------------------------------------
class ipv6_compact(object):
"""An IPv6 dialect class - compact form."""
#: The format string used to converting words into string values.
word_fmt = '%x'
#: Boolean flag indicating if IPv6 compaction algorithm should be used.
compact = True
class ipv6_full(ipv6_compact):
"""An IPv6 dialect class - 'all zeroes' form."""
#: Boolean flag indicating if IPv6 compaction algorithm should be used.
compact = False
class ipv6_verbose(ipv6_compact):
"""An IPv6 dialect class - extra wide 'all zeroes' form."""
#: The format string used to converting words into string values.
word_fmt = '%.4x'
#: Boolean flag indicating if IPv6 compaction algorithm should be used.
compact = False
#-----------------------------------------------------------------------------
def valid_str(addr, flags=0):
"""
:param addr: An IPv6 address in presentation (string) format.
:param flags: decides which rules are applied to the interpretation of the
addr value. Future use - currently has no effect.
:return: ``True`` if IPv6 address is valid, ``False`` otherwise.
"""
if addr == '':
raise AddrFormatError('Empty strings are not supported!')
try:
_inet_pton(AF_INET6, addr)
except:
return False
return True
#-----------------------------------------------------------------------------
def str_to_int(addr, flags=0):
"""
:param addr: An IPv6 address in string form.
:param flags: decides which rules are applied to the interpretation of the
addr value. Future use - currently has no effect.
:return: The equivalent unsigned integer for a given IPv6 address.
"""
try:
packed_int = _inet_pton(AF_INET6, addr)
return packed_to_int(packed_int)
except Exception:
raise AddrFormatError('%r is not a valid IPv6 address string!' % addr)
#-----------------------------------------------------------------------------
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (optional) a Python class defining formatting options.
:return: The IPv6 presentation (string) format address equivalent to the
unsigned integer provided.
"""
if dialect is None:
dialect = ipv6_compact
addr = None
try:
packed_int = int_to_packed(int_val)
if dialect.compact:
# Default return value.
addr = _inet_ntop(AF_INET6, packed_int)
else:
# Custom return value.
words = list(_struct.unpack('>8H', packed_int))
tokens = [dialect.word_fmt % word for word in words]
addr = word_sep.join(tokens)
except Exception:
raise ValueError('%r is not a valid 128-bit unsigned integer!' \
% int_val)
return addr
#-----------------------------------------------------------------------------
def int_to_arpa(int_val):
"""
:param int_val: An unsigned integer.
:return: The reverse DNS lookup for an IPv6 address in network byte
order integer form.
"""
addr = int_to_str(int_val, ipv6_verbose)
tokens = list(addr.replace(':', ''))
tokens.reverse()
# We won't support ip6.int here - see RFC 3152 for details.
tokens = tokens + ['ip6', 'arpa', '']
return '.'.join(tokens)
#-----------------------------------------------------------------------------
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
words = int_to_words(int_val, 4, 32)
return _struct.pack('>4I', *words)
#-----------------------------------------------------------------------------
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
words = list(_struct.unpack('>4I', packed_int))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 32 * i
int_val = int_val | word
return int_val
#-----------------------------------------------------------------------------
def valid_words(words):
return _valid_words(words, word_size, num_words)
#-----------------------------------------------------------------------------
def int_to_words(int_val, num_words=None, word_size=None):
if num_words is None:
num_words = globals()['num_words']
if word_size is None:
word_size = globals()['word_size']
return _int_to_words(int_val, word_size, num_words)
#-----------------------------------------------------------------------------
def words_to_int(words):
return _words_to_int(words, word_size, num_words)
#-----------------------------------------------------------------------------
def valid_bits(bits):
return _valid_bits(bits, width, word_sep)
#-----------------------------------------------------------------------------
def bits_to_int(bits):
return _bits_to_int(bits, width, word_sep)
#-----------------------------------------------------------------------------
def int_to_bits(int_val, word_sep=None):
if word_sep is None:
word_sep = globals()['word_sep']
return _int_to_bits(int_val, word_size, num_words, word_sep)
#-----------------------------------------------------------------------------
def valid_bin(bin_val):
return _valid_bin(bin_val, width)
#-----------------------------------------------------------------------------
def int_to_bin(int_val):
return _int_to_bin(int_val, width)
#-----------------------------------------------------------------------------
def bin_to_int(bin_val):
return _bin_to_int(bin_val, width)
|
phalax4/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/test/test_thread.py
|
46
|
import os
import unittest
import random
from test import test_support
thread = test_support.import_module('thread')
import time
import sys
import weakref
from test import lock_tests
NUMTASKS = 10
NUMTRIPS = 3
_print_mutex = thread.allocate_lock()
def verbose_print(arg):
"""Helper function for printing out debugging output."""
if test_support.verbose:
with _print_mutex:
print arg
class BasicThreadTest(unittest.TestCase):
def setUp(self):
self.done_mutex = thread.allocate_lock()
self.done_mutex.acquire()
self.running_mutex = thread.allocate_lock()
self.random_mutex = thread.allocate_lock()
self.created = 0
self.running = 0
self.next_ident = 0
class ThreadRunningTests(BasicThreadTest):
def newtask(self):
with self.running_mutex:
self.next_ident += 1
verbose_print("creating task %s" % self.next_ident)
thread.start_new_thread(self.task, (self.next_ident,))
self.created += 1
self.running += 1
def task(self, ident):
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" % (ident, round(delay*1e6)))
time.sleep(delay)
verbose_print("task %s done" % ident)
with self.running_mutex:
self.running -= 1
if self.created == NUMTASKS and self.running == 0:
self.done_mutex.release()
def test_starting_threads(self):
# Basic test for thread creation.
for i in range(NUMTASKS):
self.newtask()
verbose_print("waiting for tasks to complete...")
self.done_mutex.acquire()
verbose_print("all tasks done")
def test_stack_size(self):
# Various stack size tests.
self.assertEqual(thread.stack_size(), 0, "initial stack size is not 0")
thread.stack_size(0)
self.assertEqual(thread.stack_size(), 0, "stack_size not reset to default")
if os.name not in ("nt", "os2", "posix"):
return
tss_supported = True
try:
thread.stack_size(4096)
except ValueError:
verbose_print("caught expected ValueError setting "
"stack_size(4096)")
except thread.error:
tss_supported = False
verbose_print("platform does not support changing thread stack "
"size")
if tss_supported:
fail_msg = "stack_size(%d) failed - should succeed"
for tss in (262144, 0x100000, 0):
thread.stack_size(tss)
self.assertEqual(thread.stack_size(), tss, fail_msg % tss)
verbose_print("successfully set stack_size(%d)" % tss)
for tss in (262144, 0x100000):
verbose_print("trying stack_size = (%d)" % tss)
self.next_ident = 0
self.created = 0
for i in range(NUMTASKS):
self.newtask()
verbose_print("waiting for all tasks to complete")
self.done_mutex.acquire()
verbose_print("all tasks done")
thread.stack_size(0)
def test__count(self):
# Test the _count() function.
orig = thread._count()
mut = thread.allocate_lock()
mut.acquire()
started = []
def task():
started.append(None)
mut.acquire()
mut.release()
thread.start_new_thread(task, ())
while not started:
time.sleep(0.01)
self.assertEqual(thread._count(), orig + 1)
# Allow the task to finish.
mut.release()
# The only reliable way to be sure that the thread ended from the
# interpreter's point of view is to wait for the function object to be
# destroyed.
done = []
wr = weakref.ref(task, lambda _: done.append(None))
del task
while not done:
time.sleep(0.01)
self.assertEqual(thread._count(), orig)
def test_save_exception_state_on_error(self):
# See issue #14474
def task():
started.release()
raise SyntaxError
def mywrite(self, *args):
try:
raise ValueError
except ValueError:
pass
real_write(self, *args)
c = thread._count()
started = thread.allocate_lock()
with test_support.captured_output("stderr") as stderr:
real_write = stderr.write
stderr.write = mywrite
started.acquire()
thread.start_new_thread(task, ())
started.acquire()
while thread._count() > c:
time.sleep(0.01)
self.assertIn("Traceback", stderr.getvalue())
class Barrier:
def __init__(self, num_threads):
self.num_threads = num_threads
self.waiting = 0
self.checkin_mutex = thread.allocate_lock()
self.checkout_mutex = thread.allocate_lock()
self.checkout_mutex.acquire()
def enter(self):
self.checkin_mutex.acquire()
self.waiting = self.waiting + 1
if self.waiting == self.num_threads:
self.waiting = self.num_threads - 1
self.checkout_mutex.release()
return
self.checkin_mutex.release()
self.checkout_mutex.acquire()
self.waiting = self.waiting - 1
if self.waiting == 0:
self.checkin_mutex.release()
return
self.checkout_mutex.release()
class BarrierTest(BasicThreadTest):
def test_barrier(self):
self.bar = Barrier(NUMTASKS)
self.running = NUMTASKS
for i in range(NUMTASKS):
thread.start_new_thread(self.task2, (i,))
verbose_print("waiting for tasks to end")
self.done_mutex.acquire()
verbose_print("tasks done")
def task2(self, ident):
for i in range(NUMTRIPS):
if ident == 0:
# give it a good chance to enter the next
# barrier before the others are all out
# of the current one
delay = 0
else:
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" %
(ident, round(delay * 1e6)))
time.sleep(delay)
verbose_print("task %s entering %s" % (ident, i))
self.bar.enter()
verbose_print("task %s leaving barrier" % ident)
with self.running_mutex:
self.running -= 1
# Must release mutex before releasing done, else the main thread can
# exit and set mutex to None as part of global teardown; then
# mutex.release() raises AttributeError.
finished = self.running == 0
if finished:
self.done_mutex.release()
class LockTests(lock_tests.LockTests):
locktype = thread.allocate_lock
class TestForkInThread(unittest.TestCase):
def setUp(self):
self.read_fd, self.write_fd = os.pipe()
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
@test_support.reap_threads
def test_forkinthread(self):
def thread1():
try:
pid = os.fork() # fork in a thread
except RuntimeError:
sys.exit(0) # exit the child
if pid == 0: # child
os.close(self.read_fd)
os.write(self.write_fd, "OK")
sys.exit(0)
else: # parent
os.close(self.write_fd)
thread.start_new_thread(thread1, ())
self.assertEqual(os.read(self.read_fd, 2), "OK",
"Unable to fork() in thread")
def tearDown(self):
try:
os.close(self.read_fd)
except OSError:
pass
try:
os.close(self.write_fd)
except OSError:
pass
def test_main():
test_support.run_unittest(ThreadRunningTests, BarrierTest, LockTests,
TestForkInThread)
if __name__ == "__main__":
test_main()
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_LinearTrend_Seasonal_Second_ARX.py
|
1
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['LinearTrend'] , ['Seasonal_Second'] , ['ARX'] );
|
3L3N4/volatility
|
refs/heads/master
|
volatility/plugins/envars.py
|
12
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.plugins.taskmods as taskmods
import volatility.plugins.registry.registryapi as registryapi
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class Envars(taskmods.DllList):
"Display process environment variables"
def __init__(self, config, *args, **kwargs):
taskmods.DllList.__init__(self, config, *args, **kwargs)
config.add_option("SILENT", short_option = 's',
default = False,
help = "Suppress common and non-persistent variables",
action = "store_true")
def _get_silent_vars(self):
"""Enumerate persistent & common variables.
This function collects the global (all users) and
user-specific environment variables from the
registry. Any variables in a process env block that
does not exist in the persistent list was explicitly
set with the SetEnvironmentVariable() API.
"""
values = []
regapi = registryapi.RegistryApi(self._config)
ccs = regapi.reg_get_currentcontrolset()
## The global variables
for value, _ in regapi.reg_yield_values(
hive_name = 'system',
key = '{0}\\Control\\Session Manager\\Environment'.format(ccs)):
values.append(value)
## The user-specific variables
regapi.reset_current()
for value, _ in regapi.reg_yield_values(
hive_name = 'ntuser.dat', key = 'Environment'):
values.append(value)
## The volatile user variables
for value, _ in regapi.reg_yield_values(
hive_name = 'ntuser.dat', key = 'Volatile Environment'):
values.append(value)
## These are variables set explicitly but are
## common enough to ignore safely.
values.extend(["ProgramFiles", "CommonProgramFiles", "SystemDrive",
"SystemRoot", "ProgramData", "PUBLIC", "ALLUSERSPROFILE",
"COMPUTERNAME", "SESSIONNAME", "USERNAME", "USERPROFILE",
"PROMPT", "USERDOMAIN", "AppData", "CommonFiles", "CommonDesktop",
"CommonProgramGroups", "CommonStartMenu", "CommonStartUp",
"Cookies", "DesktopDirectory", "Favorites", "History", "NetHood",
"PersonalDocuments", "RecycleBin", "StartMenu", "Templates",
"AltStartup", "CommonFavorites", "ConnectionWizard",
"DocAndSettingRoot", "InternetCache", "windir", "Path", "HOMEDRIVE",
"PROCESSOR_ARCHITECTURE", "NUMBER_OF_PROCESSORS", "ProgramFiles(x86)",
"CommonProgramFiles(x86)", "CommonProgramW6432", "PSModulePath",
"PROCESSOR_IDENTIFIER", "FP_NO_HOST_CHECK", "LOCALAPPDATA", "TMP",
"ProgramW6432",
])
return values
def unified_output(self, data):
return TreeGrid([("Pid", int),
("Process", str),
("Block", Address),
("Variable", str),
("Value", str)],
self.generator(data))
def generator(self, data):
if self._config.SILENT:
silent_vars = self._get_silent_vars()
for task in data:
for var, val in task.environment_variables():
if self._config.SILENT:
if var in silent_vars:
continue
yield (0, [int(task.UniqueProcessId),
str(task.ImageFileName),
Address(task.Peb.ProcessParameters.Environment),
str(var),
str(val)])
def render_text(self, outfd, data):
self.table_header(outfd,
[("Pid", "8"),
("Process", "20"),
("Block", "[addrpad]"),
("Variable", "30"),
("Value", ""),
])
if self._config.SILENT:
silent_vars = self._get_silent_vars()
for task in data:
for var, val in task.environment_variables():
if self._config.SILENT:
if var in silent_vars:
continue
self.table_row(outfd,
task.UniqueProcessId,
task.ImageFileName,
task.Peb.ProcessParameters.Environment,
var, val
)
|
myarjunar/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/grass7/ext/r_li_mps_ascii.py
|
7
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_mps_ascii.py
-----------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import absolute_import
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .r_li import checkMovingWindow, configFile, moveOutputTxtFile
def checkParameterValuesBeforeExecuting(alg):
return checkMovingWindow(alg, True)
def processCommand(alg):
configFile(alg, True)
def processOutputs(alg):
moveOutputTxtFile(alg)
|
mete0r/mete0r.olefilefs
|
refs/heads/master
|
mete0r_olefilefs/cli.py
|
1
|
# -*- coding: utf-8 -*-
#
# mete0r.olefilefs : PyFilesystem interface to olefile
# Copyright (C) 2015 mete0r <mete0r@sarangbang.or.kr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import unicode_literals
def main():
pass
|
lebabouin/CouchPotatoServer-develop
|
refs/heads/master
|
libs/tmdb3/cache_engine.py
|
10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: cache_engine.py
# Python Library
# Author: Raymond Wagner
# Purpose: Base cache engine class for collecting registered engines
#-----------------------
import time
from weakref import ref
class Engines( object ):
def __init__(self):
self._engines = {}
def register(self, engine):
self._engines[engine.__name__] = engine
self._engines[engine.name] = engine
def __getitem__(self, key):
return self._engines[key]
def __contains__(self, key):
return self._engines.__contains__(key)
Engines = Engines()
class CacheEngineType( type ):
"""
Cache Engine Metaclass that registers new engines against the cache
for named selection and use.
"""
def __init__(mcs, name, bases, attrs):
super(CacheEngineType, mcs).__init__(name, bases, attrs)
if name != 'CacheEngine':
# skip base class
Engines.register(mcs)
class CacheEngine( object ):
__metaclass__ = CacheEngineType
name = 'unspecified'
def __init__(self, parent):
self.parent = ref(parent)
def configure(self):
raise RuntimeError
def get(self, date):
raise RuntimeError
def put(self, key, value, lifetime):
raise RuntimeError
def expire(self, key):
raise RuntimeError
class CacheObject( object ):
"""
Cache object class, containing one stored record.
"""
def __init__(self, key, data, lifetime=0, creation=None):
self.key = key
self.data = data
self.lifetime = lifetime
self.creation = creation if creation is not None else time.time()
def __len__(self):
return len(self.data)
@property
def expired(self):
return (self.remaining == 0)
@property
def remaining(self):
return max((self.creation + self.lifetime) - time.time(), 0)
|
alcemirfernandes/irobotgame
|
refs/heads/master
|
lib/pyglet/resource.py
|
2
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load application resources from a known path.
Loading resources by specifying relative paths to filenames is often
problematic in Python, as the working directory is not necessarily the same
directory as the application's script files.
This module allows applications to specify a search path for resources.
Relative paths are taken to be relative to the application's __main__ module.
ZIP files can appear on the path; they will be searched inside. The resource
module also behaves as expected when applications are bundled using py2exe or
py2app.
As well as providing file references (with the `file` function), the resource
module also contains convenience functions for loading images, textures,
fonts, media and documents.
3rd party modules or packages not bound to a specific application should
construct their own `Loader` instance and override the path to use the
resources in the module's directory.
Path format
^^^^^^^^^^^
The resource path `path` (see also `Loader.__init__` and `Loader.path`)
is a list of locations to search for resources. Locations are searched in the
order given in the path. If a location is not valid (for example, if the
directory does not exist), it is skipped.
Locations in the path beginning with an ampersand (''@'' symbol) specify
Python packages. Other locations specify a ZIP archive or directory on the
filesystem. Locations that are not absolute are assumed to be relative to the
script home. Some examples::
# Search just the `res` directory, assumed to be located alongside the
# main script file.
path = ['res']
# Search the directory containing the module `levels.level1`, followed
# by the `res` directory.
path = ['@levels.level1', 'res']
Paths are always case-sensitive, even if the filesystem is not. This
avoids a common programmer error when porting applications between platforms.
The default path is ``['.']``. If you modify the path, you must call
`reindex`.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import operator
import os
import weakref
import sys
import zipfile
import StringIO
import pyglet
class ResourceNotFoundException(Exception):
'''The named resource was not found on the search path.'''
def __init__(self, name):
message = ('Resource "%s" was not found on the path. '
'Ensure that the filename has the correct captialisation.') % name
Exception.__init__(self, message)
def get_script_home():
'''Get the directory containing the program entry module.
For ordinary Python scripts, this is the directory containing the
``__main__`` module. For executables created with py2exe the result is
the directory containing the running executable file. For OS X bundles
created using Py2App the result is the Resources directory within the
running bundle.
If none of the above cases apply and the file for ``__main__`` cannot
be determined the working directory is returned.
:rtype: str
'''
frozen = getattr(sys, 'frozen', None)
if frozen in ('windows_exe', 'console_exe'):
return os.path.dirname(sys.executable)
elif frozen == 'macosx_app':
return os.environ['RESOURCEPATH']
else:
main = sys.modules['__main__']
if hasattr(main, '__file__'):
return os.path.dirname(main.__file__)
# Probably interactive
return ''
def get_settings_path(name):
'''Get a directory to save user preferences.
Different platforms have different conventions for where to save user
preferences, saved games, and settings. This function implements those
conventions. Note that the returned path may not exist: applications
should use ``os.makedirs`` to construct it if desired.
On Linux, a hidden directory `name` in the user's home directory is
returned.
On Windows (including under Cygwin) the `name` directory in the user's
``Application Settings`` directory is returned.
On Mac OS X the `name` directory under ``~/Library/Application Support``
is returned.
:Parameters:
`name` : str
The name of the application.
:rtype: str
'''
if sys.platform in ('cygwin', 'win32'):
if 'APPDATA' in os.environ:
return os.path.join(os.environ['APPDATA'], name)
else:
return os.path.expanduser('~/%s' % name)
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/%s' % name)
else:
return os.path.expanduser('~/.%s' % name)
class Location(object):
'''Abstract resource location.
Given a location, a file can be loaded from that location with the `open`
method. This provides a convenient way to specify a path to load files
from, and not necessarily have that path reside on the filesystem.
'''
def open(self, filename, mode='rb'):
'''Open a file at this locaiton.
:Parameters:
`filename` : str
The filename to open. Absolute paths are not supported.
Relative paths are not supported by most locations (you
should specify only a filename with no path component).
`mode` : str
The file mode to open with. Only files opened on the
filesystem make use of this parameter; others ignore it.
:rtype: file object
'''
raise NotImplementedError('abstract')
class FileLocation(Location):
'''Location on the filesystem.
'''
def __init__(self, path):
'''Create a location given a relative or absolute path.
:Parameters:
`path` : str
Path on the filesystem.
'''
self.path = path
def open(self, filename, mode='rb'):
return open(os.path.join(self.path, filename), mode)
class ZIPLocation(Location):
'''Location within a ZIP file.
'''
def __init__(self, zip, dir):
'''Create a location given an open ZIP file and a path within that
file.
:Parameters:
`zip` : ``zipfile.ZipFile``
An open ZIP file from the ``zipfile`` module.
`dir` : str
A path within that ZIP file. Can be empty to specify files at
the top level of the ZIP file.
'''
self.zip = zip
self.dir = dir
def open(self, filename, mode='rb'):
path = os.path.join(self.dir, filename)
text = self.zip.read(path)
return StringIO.StringIO(text)
class URLLocation(Location):
'''Location on the network.
This class uses the ``urlparse`` and ``urllib2`` modules to open files on
the network given a URL.
'''
def __init__(self, base_url):
'''Create a location given a base URL.
:Parameters:
`base_url` : str
URL string to prepend to filenames.
'''
self.base = base_url
def open(self, filename, mode='rb'):
import urlparse
import urllib2
url = urlparse.urljoin(self.base, filename)
return urllib2.urlopen(url)
class Loader(object):
'''Load program resource files from disk.
The loader contains a search path which can include filesystem
directories, ZIP archives and Python packages.
:Ivariables:
`path` : list of str
List of search locations. After modifying the path you must
call the `reindex` method.
`script_home` : str
Base resource location, defaulting to the location of the
application script.
'''
def __init__(self, path=None, script_home=None):
'''Create a loader for the given path.
If no path is specified it defaults to ``['.']``; that is, just the
program directory.
See the module documentation for details on the path format.
:Parameters:
`path` : list of str
List of locations to search for resources.
`script_home` : str
Base location of relative files. Defaults to the result of
`get_script_home`.
'''
if path is None:
path = ['.']
if type(path) in (str, unicode):
path = [path]
self.path = list(path)
if script_home is None:
script_home = get_script_home()
self._script_home = script_home
self.reindex()
# Map name to image
self._cached_textures = weakref.WeakValueDictionary()
self._cached_images = weakref.WeakValueDictionary()
self._cached_animations = weakref.WeakValueDictionary()
# Map bin size to list of atlases
self._texture_atlas_bins = {}
def reindex(self):
'''Refresh the file index.
You must call this method if `path` is changed or the filesystem
layout changes.
'''
self._index = {}
for path in self.path:
if path.startswith('@'):
# Module
name = path[1:]
try:
module = __import__(name)
except:
continue
for component in name.split('.')[1:]:
module = getattr(module, component)
if hasattr(module, '__file__'):
path = os.path.dirname(module.__file__)
else:
path = '' # interactive
elif not os.path.isabs(path):
# Add script base unless absolute
assert '\\' not in path, \
'Backslashes not permitted in relative path'
path = os.path.join(self._script_home, path)
if os.path.isdir(path):
# Filesystem directory
location = FileLocation(path)
for name in os.listdir(path):
self._index_file(name, location)
else:
# Find path component that is the ZIP file.
dir = ''
while path and not os.path.isfile(path):
path, tail_dir = os.path.split(path)
dir = '/'.join((tail_dir, dir))
dir = dir.rstrip('/')
# path is a ZIP file, dir resides within ZIP
if path and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
location = ZIPLocation(zip, dir)
for name_path in zip.namelist():
name_dir, name = os.path.split(name_path)
assert '\\' not in name_dir
assert not name_dir.endswith('/')
if name_dir == dir:
self._index_file(name, location)
def _index_file(self, name, location):
if name not in self._index:
self._index[name] = location
def file(self, name, mode='rb'):
'''Load a resource.
:Parameters:
`name` : str
Filename of the resource to load.
`mode` : str
Combination of ``r``, ``w``, ``a``, ``b`` and ``t`` characters
with the meaning as for the builtin ``open`` function.
:rtype: file object
'''
try:
location = self._index[name]
return location.open(name, mode)
except KeyError:
raise ResourceNotFoundException(name)
def location(self, name):
'''Get the location of a resource.
This method is useful for opening files referenced from a resource.
For example, an HTML file loaded as a resource might reference some
images. These images should be located relative to the HTML file, not
looked up individually in the loader's path.
:Parameters:
`name` : str
Filename of the resource to locate.
:rtype: `Location`
'''
try:
return self._index[name]
except KeyError:
raise ResourceNotFoundException(name)
def add_font(self, name):
'''Add a font resource to the application.
Fonts not installed on the system must be added to pyglet before they
can be used with `font.load`. Although the font is added with
its filename using this function, it is loaded by specifying its
family name. For example::
resource.add_font('action_man.ttf')
action_man = font.load('Action Man')
:Parameters:
`name` : str
Filename of the font resource to add.
'''
from pyglet import font
file = self.file(name)
font.add_file(file)
def _alloc_image(self, name):
file = self.file(name)
img = pyglet.image.load(name, file=file)
bin = self._get_texture_atlas_bin(img.width, img.height)
if bin is None:
return img.get_texture(True)
return bin.add(img)
def _get_texture_atlas_bin(self, width, height):
'''A heuristic for determining the atlas bin to use for a given image
size. Returns None if the image should not be placed in an atlas (too
big), otherwise the bin (a list of TextureAtlas).
'''
# Large images are not placed in an atlas
if width > 128 or height > 128:
return None
# Group images with small height separately to larger height (as the
# allocator can't stack within a single row).
bin_size = 1
if height > 32:
bin_size = 2
try:
bin = self._texture_atlas_bins[bin_size]
except KeyError:
bin = self._texture_atlas_bins[bin_size] = \
pyglet.image.atlas.TextureBin()
return bin
def image(self, name, flip_x=False, flip_y=False, rotate=0):
'''Load an image with optional transformation.
This is similar to `texture`, except the resulting image will be
packed into a `TextureBin` if it is an appropriate size for packing.
This is more efficient than loading images into separate textures.
:Parameters:
`name` : str
Filename of the image source to load.
`flip_x` : bool
If True, the returned image will be flipped horizontally.
`flip_y` : bool
If True, the returned image will be flipped vertically.
`rotate` : int
The returned image will be rotated clockwise by the given
number of degrees (a mulitple of 90).
:rtype: `Texture`
:return: A complete texture if the image is large, otherwise a
`TextureRegion` of a texture atlas.
'''
if name in self._cached_images:
identity = self._cached_images[name]
else:
identity = self._cached_images[name] = self._alloc_image(name)
if not rotate and not flip_x and not flip_y:
return identity
return identity.get_transform(flip_x, flip_y, rotate)
def animation(self, name, flip_x=False, flip_y=False, rotate=0):
'''Load an animation with optional transformation.
Animations loaded from the same source but with different
transformations will use the same textures.
:Parameters:
`name` : str
Filename of the animation source to load.
`flip_x` : bool
If True, the returned image will be flipped horizontally.
`flip_y` : bool
If True, the returned image will be flipped vertically.
`rotate` : int
The returned image will be rotated clockwise by the given
number of degrees (a mulitple of 90).
:rtype: `Animation`
'''
try:
identity = self._cached_animations[name]
except KeyError:
animation = pyglet.image.load_animation(name, self.file(name))
bin = self._get_texture_atlas_bin(animation.get_max_width(),
animation.get_max_height())
if bin:
animation.add_to_texture_bin(bin)
identity = self._cached_animations[name] = animation
if not rotate and not flip_x and not flip_y:
return identity
return identity.get_transform(flip_x, flip_y, rotate)
def get_cached_image_names(self):
'''Get a list of image filenames that have been cached.
This is useful for debugging and profiling only.
:rtype: list
:return: List of str
'''
return self._cached_images.keys()
def get_cached_animation_names(self):
'''Get a list of animation filenames that have been cached.
This is useful for debugging and profiling only.
:rtype: list
:return: List of str
'''
return self._cached_animations.keys()
def get_texture_bins(self):
'''Get a list of texture bins in use.
This is useful for debugging and profiling only.
:rtype: list
:return: List of `TextureBin`
'''
return self._texture_atlas_bins.values()
def media(self, name, streaming=True):
'''Load a sound or video resource.
The meaning of `streaming` is as for `media.load`. Compressed
sources cannot be streamed (that is, video and compressed audio
cannot be streamed from a ZIP archive).
:Parameters:
`name` : str
Filename of the media source to load.
`streaming` : bool
True if the source should be streamed from disk, False if
it should be entirely decoded into memory immediately.
:rtype: `media.Source`
'''
from pyglet import media
try:
location = self._index[name]
if isinstance(location, FileLocation):
# Don't open the file if it's streamed from disk -- AVbin
# needs to do it.
path = os.path.join(location.path, name)
return media.load(path, streaming=streaming)
else:
file = location.open(name)
return media.load(name, file=file, streaming=streaming)
except KeyError:
raise ResourceNotFoundException(name)
def texture(self, name):
'''Load a texture.
The named image will be loaded as a single OpenGL texture. If the
dimensions of the image are not powers of 2 a `TextureRegion` will
be returned.
:Parameters:
`name` : str
Filename of the image resource to load.
:rtype: `Texture`
'''
if name in self._cached_textures:
return self._cached_textures[name]
file = self.file(name)
texture = pyglet.image.load(name, file=file).get_texture()
self._cached_textures[name] = texture
return texture
def html(self, name):
'''Load an HTML document.
:Parameters:
`name` : str
Filename of the HTML resource to load.
:rtype: `FormattedDocument`
'''
file = self.file(name)
return pyglet.text.decode_html(file.read(), self.location(name))
def attributed(self, name):
'''Load an attributed text document.
See `pyglet.text.formats.attributed` for details on this format.
:Parameters:
`name` : str
Filename of the attribute text resource to load.
:rtype: `FormattedDocument`
'''
file = self.file(name)
return pyglet.text.load(name, file, 'text/vnd.pyglet-attributed')
def text(self, name):
'''Load a plain text document.
:Parameters:
`name` : str
Filename of the plain text resource to load.
:rtype: `UnformattedDocument`
'''
file = self.file(name)
return pyglet.text.load(name, file, 'text/plain')
def get_cached_texture_names(self):
'''Get the names of textures currently cached.
:rtype: list of str
'''
return self._cached_textures.keys()
#: Default resource search path.
#:
#: Locations in the search path are searched in order and are always
#: case-sensitive. After changing the path you must call `reindex`.
#:
#: See the module documentation for details on the path format.
#:
#: :type: list of str
path = []
class _DefaultLoader(Loader):
def _get_path(self):
return path
def _set_path(self, value):
global path
path = value
path = property(_get_path, _set_path)
_default_loader = _DefaultLoader()
reindex = _default_loader.reindex
file = _default_loader.file
location = _default_loader.location
add_font = _default_loader.add_font
image = _default_loader.image
animation = _default_loader.animation
get_cached_image_names = _default_loader.get_cached_image_names
get_cached_animation_names = _default_loader.get_cached_animation_names
get_texture_bins = _default_loader.get_texture_bins
media = _default_loader.media
texture = _default_loader.texture
html = _default_loader.html
attributed = _default_loader.attributed
text = _default_loader.text
get_cached_texture_names = _default_loader.get_cached_texture_names
|
OCA/l10n-spain
|
refs/heads/12.0
|
l10n_es_aeat_mod349/models/aeat_349_map_line.py
|
2
|
# Copyright 2017 Luis M. Ontalba <luis.martinez@tecnativa.com>
# Copyright 2018 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from openerp import fields, models
class Aeat349MapLines(models.Model):
_name = 'aeat.349.map.line'
_description = 'Aeat 349 Map Line'
_rec_name = 'operation_key'
_sql_constraints = [
('unique_operation_key', 'UNIQUE(operation_key)',
"There's already another record with the same operation key"),
]
def _selection_operation_key(self):
return self.env['account.move.line'].fields_get(
allfields=['l10n_es_aeat_349_operation_key'],
)['l10n_es_aeat_349_operation_key']['selection']
physical_product = fields.Boolean(string='Involves physical product')
tax_tmpl_ids = fields.One2many(
comodel_name='account.tax.template',
inverse_name='aeat_349_map_line',
string="Taxes",
)
operation_key = fields.Selection(
selection=_selection_operation_key,
required=True,
)
|
dmishin/log-zoom
|
refs/heads/master
|
log_transform.py
|
1
|
#!/usr/bin/env python
from PIL import Image
from math import *
import os
from image_distort import transform_image, compose, scale_tfm, translate_tfm
def logpolar_transform(image_size, center, out_width=None, out_height=None, alpha0 = 0):
swidth, sheight = image_size
if center is None:
center = (swidth/2, sheight/2)
if out_width is None:
#Automatically determine output width
#Should be enough for mostly loseless transform
out_width = (swidth+sheight)*2
x0,y0 = center
min_log = log(0.5)
max_log = log(max(x0, swidth-x0)**2 + max(y0,sheight-y0)**2)*0.5
#Determine appropriate height of the output image, requiring that whole original
# image fits into it, and highest zoom levels is single pixel.
if out_height is None:
out_height = int(out_width / (2*pi) * (max_log-min_log))
out_size = (out_width, out_height)
out_scale = 2*pi/out_width
def expz(xf,yf):
ey = exp(yf)
return cos(xf)*ey, sin(xf)*ey
tfm_func1 = compose(
translate_tfm(x0, y0),
expz,
translate_tfm( alpha0, max_log ),
scale_tfm( out_scale, -out_scale)
)
def tfm_func(x,y):
xf = x*out_scale + alpha0
yf = max_log-y*out_scale #Put highest resolution at the top
ey = exp(yf)
yfs = sin(xf)*ey
xfs = cos(xf)*ey
return xfs + x0, yfs + y0
return (out_width, out_height), tfm_func
def main():
from optparse import OptionParser
parser = OptionParser(usage = "%prog [options] INPUT_IMAGE OUTPUT_IMAGE\n"
"Log-Polar image transform. Generated image always have RGBA format")
parser.add_option("-c", "--center", dest="center",
help="Center point position, x:y", metavar="X:Y")
parser.add_option("-A", "--angle", dest="angle", type=float, default=0.0,
help="Angle, corresponding left side of the transformed image, in graduses. 0 is horizontal, left to right.", metavar="ANGLE")
parser.add_option("-w", "--width", dest="width", type=int,
help="Width of the output image. Default is auto-detect, based on the source inmage dimensions (the size is usually quite big)", metavar="PIXELS")
parser.add_option("-H", "--height", dest="height", type=int,
help="Height of the output image. Default is auto-detect, based on width", metavar="PIXELS")
parser.add_option("", "--mesh-step", dest="mesh_step", type=int, default=8,
help="Step of the output mesh. Default is 8", metavar="PIXELS")
parser.add_option("", "--mercator2ortho", dest="mercator2ortho",
help="Treat source image as a piece of the map in Mercator projection. Map in converted to orthogonal projection regarding the point in the center of the map.", metavar="CENTER_LAT:LNG_WIDTH")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("No input file specified")
input =args[0]
if len(args) >= 2:
output = args[1]
else:
output = None
if options.mercator2ortho:
if options.center:
parser.error("Center not supported in mercator map pieces. It is always at the center of the image")
try:
lat_center_s, lng_extent_s = options.mercator2ortho.split(":",2)
mercator2ortho_options = {
"center_lat": float(lat_center_s)/180*pi,
"lng_extent": float(lng_extent_s)/180*pi
}
except Exception as err:
parser.error("Error parsing mercator projection options: {0}".format(err))
else:
mercator2ortho_options = None
if options.center is None:
center = None
else:
center = tuple(map(int, options.center.split(":",2)))
img = Image.open(input)
#Image conversions
# SOurce image can be one of:
# - RGB
# - RGBA - has alpha
# - I - may have alpha
# - L
# - 1
# Target image:
# always have alpha.
if img.mode != "RGBA":
img =img.convert("RGBA")
if mercator2ortho_options:
from mercator2ortho import mercator2ortho
out_ortho_size, merc2otrho_tfm, _ = mercator2ortho(img.size,
mercator2ortho_options["center_lat"],
mercator2ortho_options["lng_extent"],
max(img.size)
)
out_size, ortho2log_tfm = logpolar_transform(out_ortho_size,
center=scale_tfm(0.5)(*out_ortho_size),
out_width = options.width,
alpha0 = options.angle/180*pi)
#Create composite transform: first convert Mercator map to orthogonal projection, then apply log-transform to it.
transform = compose(
merc2otrho_tfm,
ortho2log_tfm )
else:
out_size, transform = logpolar_transform(img.size,
center=center,
out_width = options.width,
alpha0 = options.angle/180*pi)
img = transform_image(img, transform, out_size, mesh_step=options.mesh_step)
if output:
img.save(output)
else:
img.show()
if __name__=="__main__":
main()
|
jiriprochazka/lnst
|
refs/heads/master
|
recipes/switchdev/l3-002-vlan-interface.py
|
1
|
"""
Copyright 2016 Mellanox Technologies. All rights reserved.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
eladr@mellanox.com (Elad Raz)
"""
from lnst.Controller.Task import ctl
from TestLib import TestLib
from time import sleep
def test_ip(major, minor, prefix=[24,64]):
return ["192.168.10%d.%d%s" % (major, minor,
"/" + str(prefix[0]) if len(prefix) > 0 else ""),
"2002:%d::%d%s" % (major, minor,
"/" + str(prefix[1]) if len(prefix) > 1 else "")]
def ipv4(test_ip):
return test_ip[0]
def ipv6(test_ip):
return test_ip[1]
def do_task(ctl, hosts, ifaces, aliases):
m1, m2, sw = hosts
m1_if1, m2_if1, sw_if1, sw_if2 = ifaces
m1_if1.reset()
m2_if1.reset()
m1_if1_10 = m1.create_vlan(m1_if1, 10, ip=test_ip(1,1))
m2_if1_10 = m2.create_vlan(m2_if1, 10, ip=test_ip(2,1))
sw_if1_10 = sw.create_vlan(sw_if1, 10, ip=test_ip(1,2))
sw_if2_10 = sw.create_vlan(sw_if2, 10, ip=test_ip(2,2))
m1_if1_10.add_nhs_route(ipv4(test_ip(2,0)), [ipv4(test_ip(1,2,[]))]);
m2_if1_10.add_nhs_route(ipv4(test_ip(1,0)), [ipv4(test_ip(2,2,[]))]);
m1_if1.add_nhs_route(ipv6(test_ip(2,0)), [ipv6(test_ip(1,2,[]))], ipv6=True);
m2_if1.add_nhs_route(ipv6(test_ip(1,0)), [ipv6(test_ip(2,2,[]))], ipv6=True);
sleep(30)
tl = TestLib(ctl, aliases)
tl.ping_simple(m1_if1_10, m2_if1_10)
tl.netperf_tcp(m1_if1_10, m2_if1_10)
tl.netperf_udp(m1_if1_10, m2_if1_10)
do_task(ctl, [ctl.get_host("machine1"),
ctl.get_host("machine2"),
ctl.get_host("switch")],
[ctl.get_host("machine1").get_interface("if1"),
ctl.get_host("machine2").get_interface("if1"),
ctl.get_host("switch").get_interface("if1"),
ctl.get_host("switch").get_interface("if2")],
ctl.get_aliases())
|
Captain-Coder/tribler
|
refs/heads/devel
|
Tribler/Test/Community/popularity/test_community.py
|
1
|
import random
from twisted.internet.defer import inlineCallbacks
from Tribler.Core.Utilities.random_utils import random_infohash, random_string, random_utf8_string
from Tribler.Test.Core.base_test import MockObject
from Tribler.community.popularity import constants
from Tribler.community.popularity.community import PopularityCommunity, MSG_TORRENT_HEALTH_RESPONSE, \
MSG_CHANNEL_HEALTH_RESPONSE, ERROR_UNKNOWN_PEER, ERROR_NO_CONTENT, \
ERROR_UNKNOWN_RESPONSE
from Tribler.community.popularity.constants import SEARCH_TORRENT_REQUEST, MSG_TORRENT_INFO_RESPONSE, MSG_SUBSCRIPTION
from Tribler.community.popularity.payload import SearchResponseItemPayload, TorrentInfoResponsePayload, \
TorrentHealthPayload, ContentSubscription
from Tribler.community.popularity.repository import TYPE_TORRENT_HEALTH
from Tribler.community.popularity.request import ContentRequest
from Tribler.pyipv8.ipv8.test.base import TestBase
from Tribler.pyipv8.ipv8.test.mocking.ipv8 import MockIPv8
from Tribler.Test.tools import trial_timeout
class TestPopularityCommunityBase(TestBase):
NUM_NODES = 2
def setUp(self):
super(TestPopularityCommunityBase, self).setUp()
self.initialize(PopularityCommunity, self.NUM_NODES)
def create_node(self, *args, **kwargs):
def load_random_torrents(limit):
return [
['\xfdC\xf9+V\x11A\xe7QG\xfb\xb1*6\xef\xa5\xaeu\xc2\xe0',
random.randint(200, 250), random.randint(1, 10), 1525704192.166107] for _ in range(limit)
]
torrent_db = MockObject()
torrent_db.getTorrent = lambda *args, **kwargs: None
torrent_db.updateTorrent = lambda *args, **kwargs: None
torrent_db.getRecentlyCheckedTorrents = load_random_torrents
channel_db = MockObject()
return MockIPv8(u"curve25519", PopularityCommunity, torrent_db=torrent_db, channel_db=channel_db)
class MockRepository(object):
def __init__(self):
super(MockRepository, self).__init__()
self.sample_torrents = []
self.setup_torrents()
def setup_torrents(self):
for _ in range(10):
infohash = random_infohash()
name = random_utf8_string()
length = random.randint(1000, 9999)
num_files = random.randint(1, 10)
category_list = ['video', 'audio']
creation_date = random.randint(1000000, 111111111)
seeders = random.randint(10, 200)
leechers = random.randint(5, 1000)
cid = random_string(size=20)
self.sample_torrents.append([infohash, name, length, num_files, category_list, creation_date,
seeders, leechers, cid])
def search_torrent(self, _):
sample_items = []
for torrent in self.sample_torrents:
sample_items.append(SearchResponseItemPayload(*torrent))
return sample_items
def search_channels(self, _):
return []
def has_torrent(self, _):
return False
def cleanup(self):
pass
def update_from_search_results(self, results):
pass
def get_torrent(self, _):
torrent = self.sample_torrents[0]
db_torrent = {'name': torrent[1],
'length': torrent[2],
'creation_date': torrent[5],
'num_files': torrent[3],
'comment': ''}
return db_torrent
def get_top_torrents(self):
return self.sample_torrents
def update_from_torrent_search_results(self, search_results):
pass
class TestPopularityCommunity(TestPopularityCommunityBase):
__testing__ = False
NUM_NODES = 2
@inlineCallbacks
def test_subscribe_peers(self):
"""
Tests subscribing to peers populate publishers and subscribers list.
"""
self.nodes[1].overlay.send_torrent_info_response = lambda infohash, peer: None
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
# Node 0 should have a publisher added
self.assertGreater(len(self.nodes[0].overlay.publishers), 0, "Publisher expected")
# Node 1 should have a subscriber added
self.assertGreater(len(self.nodes[1].overlay.subscribers), 0, "Subscriber expected")
@inlineCallbacks
def test_subscribe_unsubscribe_individual_peers(self):
"""
Tests subscribing/subscribing an individual peer.
"""
self.nodes[1].overlay.send_torrent_info_response = lambda infohash, peer: None
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe(self.nodes[1].my_peer, subscribe=True)
yield self.deliver_messages()
self.assertEqual(len(self.nodes[0].overlay.publishers), 1, "Expected one publisher")
self.assertEqual(len(self.nodes[1].overlay.subscribers), 1, "Expected one subscriber")
self.nodes[0].overlay.subscribe(self.nodes[1].my_peer, subscribe=False)
yield self.deliver_messages()
self.assertEqual(len(self.nodes[0].overlay.publishers), 0, "Expected no publisher")
self.assertEqual(len(self.nodes[1].overlay.subscribers), 0, "Expected no subscriber")
def test_unsubscribe_multiple_peers(self):
"""
Tests unsubscribing multiple peers works as expected.
"""
def send_popular_content_subscribe(my_peer, _, subscribe):
if not subscribe:
my_peer.unsubsribe_called += 1
self.nodes[0].overlay.subscribe = lambda peer, subscribe: \
send_popular_content_subscribe(self.nodes[0], peer, subscribe)
# Add some peers
num_peers = 10
default_peers = [self.create_node() for _ in range(num_peers)]
self.nodes[0].overlay.get_peers = lambda: default_peers
self.assertEqual(len(self.nodes[0].overlay.get_peers()), num_peers)
# Add some publishers
for peer in default_peers:
self.nodes[0].overlay.publishers.add(peer)
self.assertEqual(len(self.nodes[0].overlay.publishers), num_peers)
# Unsubscribe all the peers
self.nodes[0].unsubsribe_called = 0
self.nodes[0].overlay.unsubscribe_peers()
# Check if unsubscription was successful
self.assertEqual(self.nodes[0].unsubsribe_called, num_peers)
self.assertEqual(len(self.nodes[0].overlay.publishers), 0)
def test_refresh_peers(self):
"""
Tests if refresh_peer_list() updates the publishers and subscribers list
"""
default_peers = [self.create_node() for _ in range(10)]
for peer in default_peers:
self.nodes[0].overlay.publishers.add(peer)
self.nodes[0].overlay.subscribers.add(peer)
self.nodes[0].overlay.get_peers = lambda: default_peers
self.assertEqual(len(self.nodes[0].overlay.get_peers()), 10)
# Remove half of the peers and refresh peer list
default_peers = default_peers[:5]
self.nodes[0].overlay.refresh_peer_list()
# List of publishers and subscribers should be updated
self.assertEqual(len(self.nodes[0].overlay.get_peers()), 5)
self.assertEqual(len(self.nodes[0].overlay.subscribers), 5)
self.assertEqual(len(self.nodes[0].overlay.publishers), 5)
@trial_timeout(6)
@inlineCallbacks
def test_start(self):
"""
Tests starting of the community. Peer should start subscribing to other connected peers.
"""
self.nodes[1].overlay.send_torrent_info_response = lambda infohash, peer: None
def fake_refresh_peer_list(peer):
peer.called_refresh_peer_list = True
def fake_publish_next_content(peer):
peer.called_publish_next_content = True
self.nodes[0].called_refresh_peer_list = False
self.nodes[0].called_publish_next_content = False
self.nodes[0].overlay.refresh_peer_list = lambda: fake_refresh_peer_list(self.nodes[0])
self.nodes[0].overlay.publish_next_content = lambda: fake_publish_next_content(self.nodes[0])
yield self.introduce_nodes()
self.nodes[0].overlay.start()
yield self.sleep(constants.PUBLISH_INTERVAL)
# Node 0 should have a publisher added
self.assertEqual(len(self.nodes[0].overlay.publishers), 1, "Expected one publisher")
# Node 1 should have a subscriber added
self.assertEqual(len(self.nodes[1].overlay.subscribers), 1, "Expected one subscriber")
self.assertTrue(self.nodes[0].called_refresh_peer_list)
self.assertTrue(self.nodes[0].called_publish_next_content)
@inlineCallbacks
def test_content_publishing(self):
"""
Tests publishing next available content.
:return:
"""
def on_torrent_health_response(peer, source_address, data):
peer.torrent_health_response_received = True
self.nodes[0].torrent_health_response_received = False
self.nodes[0].overlay.decode_map[chr(MSG_TORRENT_HEALTH_RESPONSE)] = lambda source_address, data: \
on_torrent_health_response(self.nodes[0], source_address, data)
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
# Add something to queue
health_info = ('a' * 20, random.randint(1, 100), random.randint(1, 10), random.randint(1, 111111))
self.nodes[1].overlay.queue_content(TYPE_TORRENT_HEALTH, health_info)
self.nodes[1].overlay.publish_next_content()
yield self.deliver_messages()
self.assertTrue(self.nodes[0].torrent_health_response_received, "Expected to receive torrent response")
@inlineCallbacks
def test_publish_no_content(self):
"""
Tests publishing next content if no content is available.
"""
original_logger = self.nodes[0].overlay.logger
self.nodes[0].overlay.logger.debug = lambda *args, **kw: self.fake_logger_error(self.nodes[0], *args)
# Assume a subscribers exist
self.nodes[0].overlay.subscribers = [self.create_node()]
# No content
self.nodes[0].overlay.content_repository.pop_content = lambda: (None, None)
# Try publishing the next available content
self.nodes[0].no_content = False
self.nodes[0].overlay.publish_next_content()
yield self.deliver_messages()
# Expect no content found to be logged
self.assertTrue(self.nodes[0].no_content)
# Restore logger
self.nodes[0].overlay.logger = original_logger
@inlineCallbacks
def test_send_torrent_health_response(self):
"""
Tests sending torrent health response.
"""
original_logger = self.nodes[0].overlay.logger
self.nodes[0].overlay.logger.debug = lambda *args, **kw: self.fake_logger_error(self.nodes[0], *args)
self.nodes[0].overlay.create_message_packet = lambda _type, _payload: \
self.fake_create_message_packet(self.nodes[0], _type, _payload)
self.nodes[0].overlay.broadcast_message = lambda packet, peer: \
self.fake_broadcast_message(self.nodes[0], packet, peer)
# Two default peers
default_peers = [self.create_node() for _ in range(2)]
# Assuming only one is connected
self.nodes[0].overlay.get_peers = lambda: default_peers[:1]
# Case1: Try to send subscribe response to non-connected peer
self.nodes[0].unknown_peer_found = False
self.nodes[0].logger_error_called = False
payload = MockObject()
self.nodes[0].overlay.send_torrent_health_response(payload, peer=default_peers[1])
yield self.deliver_messages()
# Expected unknown peer error log
self.assertTrue(self.nodes[0].logger_error_called)
self.assertTrue(self.nodes[0].unknown_peer_found)
# Case2: Try to send response to the connected peer
self.nodes[0].broadcast_called = False
self.nodes[0].broadcast_packet_type = None
self.nodes[0].overlay.send_torrent_health_response(payload, peer=default_peers[0])
yield self.deliver_messages()
# Expect message to be sent
self.assertTrue(self.nodes[0].packet_created, "Create packet failed")
self.assertEqual(self.nodes[0].packet_type, MSG_TORRENT_HEALTH_RESPONSE, "Unexpected payload type found")
self.assertTrue(self.nodes[0].broadcast_called, "Should send a message to the peer")
self.assertEqual(self.nodes[0].receiver, default_peers[0], "Intended receiver is different")
# Restore logger
self.nodes[0].overlay.logger = original_logger
@inlineCallbacks
def test_send_channel_health_response(self):
"""
Tests sending torrent health response.
"""
original_logger = self.nodes[0].overlay.logger
self.nodes[0].overlay.logger.debug = lambda *args, **kw: self.fake_logger_error(self.nodes[0], *args)
self.nodes[0].overlay.create_message_packet = lambda _type, _payload: \
self.fake_create_message_packet(self.nodes[0], _type, _payload)
self.nodes[0].overlay.broadcast_message = lambda packet, peer: \
self.fake_broadcast_message(self.nodes[0], packet, peer)
# Two default peers
default_peers = [self.create_node() for _ in range(2)]
# Assuming only one is connected
self.nodes[0].overlay.get_peers = lambda: default_peers[:1]
# Case1: Try to send response to non-connected peer
self.nodes[0].unknown_peer_found = False
self.nodes[0].logger_error_called = False
payload = MockObject()
self.nodes[0].overlay.send_channel_health_response(payload, peer=default_peers[1])
yield self.deliver_messages()
# Expected unknown peer error log
self.assertTrue(self.nodes[0].logger_error_called)
self.assertTrue(self.nodes[0].unknown_peer_found)
# Case2: Try to send response to the connected peer
self.nodes[0].broadcast_called = False
self.nodes[0].broadcast_packet_type = None
self.nodes[0].overlay.send_channel_health_response(payload, peer=default_peers[0])
yield self.deliver_messages()
# Expect message to be sent
self.assertTrue(self.nodes[0].packet_created, "Create packet failed")
self.assertEqual(self.nodes[0].packet_type, MSG_CHANNEL_HEALTH_RESPONSE, "Unexpected payload type found")
self.assertTrue(self.nodes[0].broadcast_called, "Should send a message to the peer")
self.assertEqual(self.nodes[0].receiver, default_peers[0], "Intended receiver is different")
# Restore logger
self.nodes[0].overlay.logger = original_logger
@inlineCallbacks
def test_send_torrent_info_request_response(self):
""" Test if torrent info request response works as expected. """
self.nodes[1].called_send_torrent_info_response = False
original_send_torrent_info_response = self.nodes[1].overlay.send_torrent_info_response
def send_torrent_info_response(node, infohash, peer):
node.called_infohash = infohash
node.called_peer = peer
node.called_send_torrent_info_response = True
self.nodes[1].overlay.send_torrent_info_response = lambda infohash, peer: \
send_torrent_info_response(self.nodes[1], infohash, peer)
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
infohash = 'a'*20
self.nodes[0].overlay.send_torrent_info_request(infohash, self.nodes[1].my_peer)
yield self.deliver_messages()
self.assertTrue(self.nodes[1].called_send_torrent_info_response)
self.nodes[1].overlay.send_torrent_info_response = original_send_torrent_info_response
@inlineCallbacks
def test_send_content_info_request_response(self):
""" Test if content info request response works as expected """
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[1].overlay.content_repository = MockRepository()
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
self.nodes[1].called_send_content_info_response = False
def send_content_info_response(node, peer, content_type):
node.called_send_content_info_response = True
node.called_peer = peer
node.called_content_type = content_type
self.nodes[1].overlay.send_content_info_response = lambda peer, identifier, content_type, _: \
send_content_info_response(self.nodes[1], peer, content_type)
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
content_type = SEARCH_TORRENT_REQUEST
request_list = ['ubuntu']
self.nodes[0].overlay.send_content_info_request(content_type, request_list, peer=self.nodes[1].my_peer)
yield self.deliver_messages()
self.assertTrue(self.nodes[1].called_send_content_info_response)
@inlineCallbacks
def test_on_torrent_health_response_from_unknown_peer(self):
"""
Tests receiving torrent health response from unknown peer
"""
original_logger = self.nodes[0].overlay.logger
self.nodes[0].overlay.logger.error = lambda *args, **kw: self.fake_logger_error(self.nodes[0], *args)
infohash = 'a' * 20
num_seeders = 10
num_leechers = 5
timestamp = 123123123
payload = TorrentHealthPayload(infohash, num_seeders, num_leechers, timestamp)
source_address = ('1.1.1.1', 1024)
data = self.nodes[0].overlay.create_message_packet(MSG_TORRENT_HEALTH_RESPONSE, payload)
self.nodes[0].unknown_response = False
self.nodes[0].overlay.on_torrent_health_response(source_address, data)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].unknown_response)
# Restore logger
self.nodes[0].overlay.logger = original_logger
@inlineCallbacks
def test_on_torrent_health_response(self):
"""
Tests receiving torrent health response from unknown peer
"""
def fake_update_torrent(peer):
peer.called_update_torrent = True
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[0].overlay.content_repository.update_torrent_health = lambda payload, peer_trust: \
fake_update_torrent(self.nodes[0])
infohash = 'a' * 20
num_seeders = 10
num_leechers = 5
timestamp = 123123123
payload = TorrentHealthPayload(infohash, num_seeders, num_leechers, timestamp)
data = self.nodes[1].overlay.create_message_packet(MSG_TORRENT_HEALTH_RESPONSE, payload)
yield self.introduce_nodes()
# Add node 1 in publisher list of node 0
self.nodes[0].overlay.publishers.add(self.nodes[1].my_peer)
self.nodes[0].overlay.on_torrent_health_response(self.nodes[1].my_peer.address, data)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].called_update_torrent)
@inlineCallbacks
def test_on_torrent_info_response(self):
"""
Tests receiving torrent health response.
"""
def fake_update_torrent_info(peer):
peer.called_update_torrent = True
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[0].overlay.content_repository.update_torrent_info = lambda payload: \
fake_update_torrent_info(self.nodes[0])
infohash = 'a' * 20
name = "ubuntu"
length = 100
creation_date = 123123123
num_files = 33
comment = ''
payload = TorrentInfoResponsePayload(infohash, name, length, creation_date, num_files, comment)
data = self.nodes[1].overlay.create_message_packet(MSG_TORRENT_INFO_RESPONSE, payload)
yield self.introduce_nodes()
# Add node 1 in publisher list of node 0
self.nodes[0].overlay.publishers.add(self.nodes[1].my_peer)
self.nodes[0].overlay.on_torrent_info_response(self.nodes[1].my_peer.address, data)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].called_update_torrent)
@inlineCallbacks
def test_on_torrent_info_response_from_unknown_peer(self):
"""
Tests receiving torrent health response from unknown peer.
"""
def fake_update_torrent_info(peer):
peer.called_update_torrent = True
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[0].overlay.content_repository.update_torrent_info = lambda payload: \
fake_update_torrent_info(self.nodes[0])
infohash = 'a' * 20
name = "ubuntu"
length = 100
creation_date = 123123123
num_files = 33
comment = ''
payload = TorrentInfoResponsePayload(infohash, name, length, creation_date, num_files, comment)
data = self.nodes[1].overlay.create_message_packet(MSG_TORRENT_INFO_RESPONSE, payload)
yield self.introduce_nodes()
self.nodes[0].called_update_torrent = False
self.nodes[0].overlay.on_torrent_info_response(self.nodes[1].my_peer.address, data)
yield self.deliver_messages()
self.assertFalse(self.nodes[0].called_update_torrent)
@inlineCallbacks
def test_on_subscription_status(self):
"""
Tests receiving subscription status.
"""
subscribe = True
identifier = 123123123
payload = ContentSubscription(identifier, subscribe)
data = self.nodes[1].overlay.create_message_packet(MSG_SUBSCRIPTION, payload)
# Set the cache request
self.nodes[0].overlay.request_cache.pop = lambda prefix, identifer: MockObject()
self.nodes[0].overlay.request_cache.has = lambda prefix, identifer: True
yield self.introduce_nodes()
self.assertEqual(len(self.nodes[0].overlay.publishers), 0)
self.nodes[0].overlay.on_subscription_status(self.nodes[1].my_peer.address, data)
yield self.deliver_messages()
self.assertEqual(len(self.nodes[0].overlay.publishers), 1)
@inlineCallbacks
def test_on_subscription_status_no_cache(self):
"""
Tests receiving subscription status when request is not available in cache.
"""
subscribe = True
identifier = 123123123
payload = ContentSubscription(identifier, subscribe)
data = self.nodes[1].overlay.create_message_packet(MSG_SUBSCRIPTION, payload)
# Assume cache request is present
self.nodes[0].overlay.request_cache.has = lambda prefix, identifer: False
yield self.introduce_nodes()
self.assertEqual(len(self.nodes[0].overlay.publishers), 0)
self.nodes[0].overlay.on_subscription_status(self.nodes[1].my_peer.address, data)
yield self.deliver_messages()
self.assertEqual(len(self.nodes[0].overlay.publishers), 0)
@inlineCallbacks
def test_on_subscription_status_with_unsubscribe(self):
"""
Tests receiving subscription status with unsubscribe status.
"""
yield self.introduce_nodes()
self.nodes[0].overlay.publishers.add(self.nodes[1].my_peer)
self.assertEqual(len(self.nodes[0].overlay.publishers), 1)
# Set the cache request
self.nodes[0].overlay.request_cache.pop = lambda prefix, identifer: MockObject()
self.nodes[0].overlay.request_cache.has = lambda prefix, identifer: True
subscribe = False
identifier = 123123123
payload = ContentSubscription(identifier, subscribe)
data = self.nodes[1].overlay.create_message_packet(MSG_SUBSCRIPTION, payload)
self.nodes[0].overlay.on_subscription_status(self.nodes[1].my_peer.address, data)
yield self.deliver_messages()
self.assertEqual(len(self.nodes[0].overlay.publishers), 0)
@inlineCallbacks
def test_search_request_response(self):
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[1].overlay.content_repository = MockRepository()
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
def fake_process_torrent_search_response(peer):
peer.called_process_torrent_search_response = True
self.nodes[0].overlay.process_torrent_search_response = lambda query, payload: \
fake_process_torrent_search_response(self.nodes[0])
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
# Create a search request
query = "ubuntu"
self.nodes[0].overlay.send_torrent_search_request(query)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].called_process_torrent_search_response)
@inlineCallbacks
def test_process_search_response(self):
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[1].overlay.content_repository = MockRepository()
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
def fake_notify(peer, result_dict):
peer.called_search_result_notify = True
self.assertEqual(result_dict['keywords'], 'ubuntu')
self.assertGreater(len(result_dict['results']), 1)
self.nodes[0].overlay.tribler_session = MockObject()
self.nodes[0].overlay.tribler_session.notifier = MockObject()
self.nodes[0].overlay.tribler_session.notifier.notify = lambda signal1, signal2, _, result_dict: \
fake_notify(self.nodes[0], result_dict)
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
# Create a search request
query = "ubuntu"
self.nodes[0].called_search_result_notify = False
self.nodes[0].overlay.send_torrent_search_request(query)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].called_search_result_notify)
@inlineCallbacks
def test_send_content_info_request(self):
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[1].overlay.content_repository = MockRepository()
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
self.nodes[0].received_response = False
self.nodes[0].received_query = None
def process_torrent_search_response(node, query):
node.received_response = True
node.received_query = query
self.nodes[0].overlay.process_torrent_search_response = lambda query, data: \
process_torrent_search_response(self.nodes[0], query)
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
content_type = SEARCH_TORRENT_REQUEST
request_list = ["ubuntu"]
self.nodes[0].overlay.send_content_info_request(content_type, request_list, limit=5, peer=None)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].received_response)
self.assertEqual(self.nodes[0].received_query, request_list)
@inlineCallbacks
def test_send_torrent_info_response(self):
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[1].overlay.content_repository = MockRepository()
self.nodes[0].called_on_torrent_info_response = False
def on_torrent_info_response(node):
node.called_on_torrent_info_response = True
self.nodes[0].overlay.decode_map[chr(MSG_TORRENT_INFO_RESPONSE)] = lambda _source_address, _data: \
on_torrent_info_response(self.nodes[0])
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
infohash = 'a'*20
self.nodes[1].overlay.send_torrent_info_response(infohash, self.nodes[0].my_peer)
yield self.deliver_messages()
self.assertTrue(self.nodes[0].called_on_torrent_info_response)
@inlineCallbacks
def test_search_request_timeout(self):
"""
Test whether the callback is called with an empty list when the search request times out
"""
ContentRequest.CONTENT_TIMEOUT = 0.1
self.nodes[0].overlay.content_repository = MockRepository()
self.nodes[1].overlay.content_repository = MockRepository()
self.nodes[1].overlay.publish_latest_torrents = lambda *args, **kwargs: None
yield self.introduce_nodes()
self.nodes[0].overlay.subscribe_peers()
yield self.deliver_messages()
# Make sure that the other node does not respond to our search query
self.nodes[1].overlay.send_content_info_response = lambda *_, **__: None
def on_results(results):
self.assertIsInstance(results, list)
self.assertFalse(results)
content_type = SEARCH_TORRENT_REQUEST
deferred = self.nodes[0].overlay.send_content_info_request(content_type, ["ubuntu"], limit=5, peer=None)
yield deferred.addCallback(on_results)
def fake_logger_error(self, my_peer, *args):
if ERROR_UNKNOWN_PEER in args[0]:
my_peer.unknown_peer_found = True
if ERROR_NO_CONTENT in args[0]:
my_peer.no_content = True
if ERROR_UNKNOWN_RESPONSE in args[0]:
my_peer.unknown_response = True
my_peer.logger_error_called = True
def fake_create_message_packet(self, my_peer, _type, _payload):
my_peer.packet_created = True
my_peer.packet_type = _type
def fake_broadcast_message(self, my_peer, _, peer):
my_peer.broadcast_called = True
my_peer.receiver = peer
|
zhanghui9700/eonboard
|
refs/heads/master
|
eoncloud_web/cloud/utils/__init__.py
|
12133432
| |
lmorchard/django
|
refs/heads/master
|
tests/field_defaults/__init__.py
|
12133432
| |
plilja/project-euler
|
refs/heads/master
|
problem_51/__init__.py
|
12133432
| |
liavkoren/djangoDev
|
refs/heads/master
|
tests/model_forms/__init__.py
|
12133432
| |
Alberto-Beralix/Beralix
|
refs/heads/master
|
i386-squashfs-root/usr/lib/python2.6/dist-packages/chardet/euctwfreq.py
|
2
|
../../../../share/pyshared/chardet/euctwfreq.py
|
androidarmv6/android_external_chromium_org
|
refs/heads/cm-11.0
|
tools/telemetry/telemetry/core/gpu_device.py
|
120
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class GPUDevice(object):
"""Provides information about an individual GPU device.
On platforms which support them, the vendor_id and device_id are
PCI IDs. On other platforms, the vendor_string and device_string
are platform-dependent strings.
"""
_VENDOR_ID_MAP = {
0x1002: 'ATI',
0x8086: 'Intel',
0x10de: 'Nvidia',
}
def __init__(self, vendor_id, device_id, vendor_string, device_string):
self._vendor_id = vendor_id
self._device_id = device_id
self._vendor_string = vendor_string
self._device_string = device_string
def __str__(self):
vendor = 'VENDOR = 0x%x' % self._vendor_id
vendor_string = self._vendor_string
if not vendor_string and self._vendor_id in self._VENDOR_ID_MAP:
vendor_string = self._VENDOR_ID_MAP[self._vendor_id]
if vendor_string:
vendor += ' (%s)' % vendor_string
device = 'DEVICE = 0x%x' % self._device_id
if self._device_string:
device += ' (%s)' % self._device_string
return '%s, %s' % (vendor, device)
@classmethod
def FromDict(cls, attrs):
"""Constructs a GPUDevice from a dictionary. Requires the
following attributes to be present in the dictionary:
vendor_id
device_id
vendor_string
device_string
Raises an exception if any attributes are missing.
"""
return cls(attrs['vendor_id'], attrs['device_id'],
attrs['vendor_string'], attrs['device_string'])
@property
def vendor_id(self):
"""The GPU vendor's PCI ID as a number, or 0 if not available.
Most desktop machines supply this information rather than the
vendor and device strings."""
return self._vendor_id
@property
def device_id(self):
"""The GPU device's PCI ID as a number, or 0 if not available.
Most desktop machines supply this information rather than the
vendor and device strings."""
return self._device_id
@property
def vendor_string(self):
"""The GPU vendor's name as a string, or the empty string if not
available.
Most mobile devices supply this information rather than the PCI
IDs."""
return self._vendor_string
@property
def device_string(self):
"""The GPU device's name as a string, or the empty string if not
available.
Most mobile devices supply this information rather than the PCI
IDs."""
return self._device_string
|
m-sanders/wagtail
|
refs/heads/master
|
wagtail/utils/apps.py
|
59
|
from importlib import import_module
from django.apps import apps
from django.utils.module_loading import module_has_submodule
def get_app_modules():
"""
Generator function that yields a module object for each installed app
yields tuples of (app_name, module)
"""
for app in apps.get_app_configs():
yield app.name, app.module
def get_app_submodules(submodule_name):
"""
Searches each app module for the specified submodule
yields tuples of (app_name, module)
"""
for name, module in get_app_modules():
if module_has_submodule(module, submodule_name):
yield name, import_module('%s.%s' % (name, submodule_name))
|
maciekcc/tensorflow
|
refs/heads/master
|
tensorflow/contrib/remote_fused_graph/pylib/python/ops/__init__.py
|
189
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Remote fused graph ops python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
levyx/spark-sql-perf
|
refs/heads/master
|
dev/merge_pr.py
|
9
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_SQL_PERF_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "origin")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/databricks/spark-sql-perf/pull"
GITHUB_API_BASE = "https://api.github.com/repos/databricks/spark-sql-perf"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print "Exceeded the GitHub API rate limit; see the instructions in " + \
"dev/merge_spark_pr.py to configure an OAuth token for making authenticated " + \
"GitHub requests."
else:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = raw_input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Spark.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("SPARK")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX] [MLLIB] Issue"
>>> standardize_jira_ref("[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref("[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123] [PROJECT INFRA] [WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954] [MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref("SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146] [WIP] Vagrant support for Spark'
>>> standardize_jira_ref("SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref("[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250] [SPARK-6146] [SPARK-5911] [SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\] (\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ' '.join(jira_refs).strip() + " " + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = run_cmd("git rev-parse HEAD")[:8]
branches = get_json("%s/branches" % GITHUB_API_BASE)
#branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = "master"
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % pr["title"]
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print "Using modified title:"
else:
title = pr["title"]
print "Using original title:"
print title
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira-python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
main()
|
freedesktop-unofficial-mirror/gstreamer__gst-python
|
refs/heads/master
|
old_examples/tagsetter.py
|
4
|
#!/usr/bin/env python
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# gst-python
# Copyright (C) 2009 Stefan Kost <ensonic@user.sf.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
import sys
import gobject
gobject.threads_init()
import pygst
pygst.require('0.10')
import gst
mainloop = gobject.MainLoop()
def on_eos(bus, msg):
mainloop.quit()
def main(args):
"Tagsetter test, test result with:"
"gst-launch -t playbin uri=file://$PWD/test.avi"
# create a new bin to hold the elements
bin = gst.parse_launch('audiotestsrc num-buffers=100 ! ' +
'lame ! ' +
'avimux name=mux ! ' +
'filesink location=test.avi')
mux = bin.get_by_name('mux')
bus = bin.get_bus()
bus.add_signal_watch()
bus.connect('message::eos', on_eos)
# prepare
bin.set_state(gst.STATE_READY)
# send tags
l = gst.TagList()
l[gst.TAG_ARTIST] = "Unknown Genius"
l[gst.TAG_TITLE] = "Unnamed Artwork"
mux.merge_tags(l, gst.TAG_MERGE_APPEND)
# start playing
bin.set_state(gst.STATE_PLAYING)
try:
mainloop.run()
except KeyboardInterrupt:
pass
# stop the bin
bin.set_state(gst.STATE_NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
edmundgentle/schoolscript
|
refs/heads/master
|
SchoolScript/bin/Debug/pythonlib/Lib/fnmatch.py
|
3
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=250)
def _compile_pattern(pat, is_bytes=False):
if is_bytes:
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat, isinstance(pat, bytes))
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat, isinstance(pat, bytes))
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
|
Sweetgrassbuffalo/ReactionSweeGrass-v2
|
refs/heads/master
|
.meteor/local/dev_bundle/python/Lib/test/curses_tests.py
|
202
|
#!/usr/bin/env python
#
# $Id: ncurses.py 36559 2004-07-18 05:56:09Z tim_one $
#
# Interactive test suite for the curses module.
# This script displays various things and the user should verify whether
# they display correctly.
#
import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly-3, ulx, "Use Ctrl-G to end editing (%s)." % mode)
stdscr.addstr(uly-2, ulx, "Be sure to try typing in the lower-right corner.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly+ncols+2, 0, "Text entered in the box\n")
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly+ncols+2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
|
slozier/ironpython2
|
refs/heads/master
|
Src/StdLib/Lib/mailcap.py
|
64
|
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except IOError:
continue
with fp:
morecaps = readmailcapfile(fp)
for key, value in morecaps.iteritems():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# XXX Actually, this is Unix-specific
if 'MAILCAPS' in os.environ:
str = os.environ['MAILCAPS']
mailcaps = str.split(':')
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = filter(lambda e, key=key: key in e, entries)
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print "usage: mailcap [MIMEtype file] ..."
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print "No viewer found for", type
else:
print "Executing:", command
sts = os.system(command)
if sts:
print "Exit status:", sts
def show(caps):
print "Mailcap files:"
for fn in listmailcapfiles(): print "\t" + fn
print
if not caps: caps = getcaps()
print "Mailcap entries:"
print
ckeys = caps.keys()
ckeys.sort()
for type in ckeys:
print type
entries = caps[type]
for e in entries:
keys = e.keys()
keys.sort()
for k in keys:
print " %-15s" % k, e[k]
print
if __name__ == '__main__':
test()
|
CivicTechTO/open-cabinet
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/sites/management.py
|
467
|
"""
Creates the default Site object.
"""
from django.apps import apps
from django.conf import settings
from django.core.management.color import no_style
from django.db import DEFAULT_DB_ALIAS, connections, router
def create_default_site(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
try:
Site = apps.get_model('sites', 'Site')
except LookupError:
return
if not router.allow_migrate_model(using, Site):
return
if not Site.objects.using(using).exists():
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=getattr(settings, 'SITE_ID', 1), domain="example.com", name="example.com").save(using=using)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[using].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
with connections[using].cursor() as cursor:
for command in sequence_sql:
cursor.execute(command)
|
basicer/git
|
refs/heads/master
|
git_remote_helpers/__init__.py
|
66
|
#!/usr/bin/env python
"""Support library package for git remote helpers.
Git remote helpers are helper commands that interfaces with a non-git
repository to provide automatic import of non-git history into a Git
repository.
This package provides the support library needed by these helpers..
The following modules are included:
- git.git - Interaction with Git repositories
- util - General utility functionality use by the other modules in
this package, and also used directly by the helpers.
"""
|
malaonline/Server
|
refs/heads/master
|
server/app/tests.py
|
1
|
import os
import json
import itertools
import datetime
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.test import Client, TestCase, SimpleTestCase
from django.test.client import BOUNDARY, MULTIPART_CONTENT, encode_multipart
from django.utils import timezone
from rest_framework.authtoken.models import Token
from app.models import Parent, Teacher, Checkcode, Profile, TimeSlot, Order, \
WeeklyTimeSlot, AuditRecord, Coupon, School, Region, Subject, Grade, \
Ability, Lecturer, LiveClass, ClassRoom
from app.utils.algorithm import Tree, Node, verify_sig
from app.utils.types import parseInt, parse_date, parse_date_next
import app.utils.klx_api as klx
from app.tasks import send_push
app_path = os.path.abspath(os.path.dirname(__file__))
# Create your tests here.
class TestApi(TestCase):
def setUp(self):
# 确保单元测试不会发送短信
self.assertTrue(settings.FAKE_SMS_SERVER)
def tearDown(self):
pass
def test_token_key(self):
# 测试token是否能正常创建
user = User.objects.get(username="parent0")
token = Token.objects.create(user=user)
self.assertTrue(isinstance(token.key, str))
def test_concrete_timeslots(self):
hours = 2
weekly_time_slots = list(WeeklyTimeSlot.objects.filter(
weekday=1, start=datetime.time(8, 0)))
teacher = Teacher.objects.all()[0]
timeslots = Order.objects.concrete_timeslots(
hours, weekly_time_slots, teacher)
self.assertEqual(len(timeslots), 1)
ts = timeslots[0]
self.assertEqual(timezone.localtime(ts['start']).hour, 8)
def test_sms_login(self):
phone = '0001'
code = '1111'
client = Client()
sms_url = reverse('sms')
# parent login or register via sms
parent_group, _new = Group.objects.get_or_create(name="家长")
self.assertIsNotNone(parent_group)
# (1) default content_type
# send
data = {'action': "send", 'phone': phone}
response = client.post(sms_url, data=data)
self.assertEqual(response.status_code, 200)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret["sent"])
# verify
data = {'action': "verify", 'phone': phone, 'code': code}
response = client.post(sms_url, data=data)
self.assertEqual(response.status_code, 200)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret["verified"])
token = json_ret.get("token")
self.assertTrue(isinstance(token, str) and token != '')
# (2) json content_typ
# send
content_type = "application/json"
data = {'action': "send", 'phone': phone}
data = json.dumps(data)
response = client.post(sms_url, data=data, content_type=content_type)
self.assertEqual(response.status_code, 200)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret["sent"])
# verify
data = {'action': "verify", 'phone': phone, 'code': code}
data = json.dumps(data)
response = client.post(sms_url, data=data, content_type=content_type)
self.assertEqual(response.status_code, 200)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret["verified"])
token = json_ret.get("token")
self.assertTrue(isinstance(token, str) and token != '')
def test_teacher_list(self):
client = Client()
url = "/api/v1/teachers"
response = client.get(url)
self.assertEqual(response.status_code, 200)
url = "/api/v1/teachers?grade=4&subject=3&tags=2+6"
response = client.get(url)
self.assertEqual(response.status_code, 200)
url = "/api/v1/teachers?grade=4&subject=3&tags=2+6&school=1"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_teacher_detail(self):
client = Client()
url = "/api/v1/teachers"
response = client.get(url)
response_content = json.loads(response.content.decode())
if response_content['results'] is []:
logger.debug(response_content)
pk = json.loads(response.content.decode())['results'][0]['id']
url = "/api/v1/teachers/%d" % pk
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_tag_list(self):
client = Client()
url = "/api/v1/tags"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_grade_list(self):
client = Client()
url = "/api/v1/grades"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_memberservice_list(self):
client = Client()
url = "/api/v1/memberservices"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_weeklytimeslot_list(self):
client = Client()
url = "/api/v1/weeklytimeslots"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_policy(self):
client = Client()
url = "/api/v1/policy"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_teacher_weekly_time_slot(self):
client = Client()
username = "parent0"
password = "123123"
client.login(username=username, password=password)
url = "/api/v1/teachers/1/weeklytimeslots?school_id=1"
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_get_token_key(self):
client = Client()
request_url = "/api/v1/token-auth"
username = "parent1"
password = "123123"
user = authenticate(username=username, password=password)
self.assertNotEqual(user, None)
parent_user = User.objects.get(username=username)
self.assertEqual(parent_user.is_active, 1)
response = client.post(request_url, {"username": username,
"password": password})
self.assertEqual(response.status_code, 200)
client2 = Client()
response2 = client2.post(request_url, {"username": username,
"password": password})
self.assertEqual(response.content, response2.content)
def test_modify_student_name(self):
username = "parent1"
password = "123123"
user = User.objects.get(username=username)
parent = user.parent
client = Client()
client.login(username=username, password=password)
request_url = "/api/v1/parents/%d" % (parent.pk,)
json_data = json.dumps({"student_name": "StudentNewName"})
response = client.patch(request_url, content_type="application/json",
data=json_data)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret["done"], "true")
parent_after = Parent.objects.get(user=user)
self.assertEqual(parent_after.student_name, "StudentNewName")
parent_after.student_name = ""
parent_after.save()
response = client.patch(request_url, content_type="application/json",
data=json_data)
self.assertEqual(200, response.status_code)
self.assertEqual(response.content.decode(), '{"done":"true"}')
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret["done"], "true")
request_url = "/api/v1/parents/%d" % (parent.pk,)
school_name = '洛阳一中'
json_data = json.dumps({"student_school_name": school_name})
response = client.patch(request_url, content_type="application/json",
data=json_data)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret["done"], "true")
parent_after = Parent.objects.get(user=user)
self.assertEqual(parent_after.student_school_name, school_name)
def test_modify_user_avatar(self):
username = "parent1"
password = "123123"
user = User.objects.get(username=username)
change_profile_perm = Permission.objects.get(name='Can change profile')
user.user_permissions.add(change_profile_perm)
user.save()
client = Client()
client.login(username=username, password=password)
request_url = "/api/v1/profiles/%d" % (user.profile.pk,)
img_name = 'img0' # NOTE: seq is 0 not 1, seq of the user 'parent1'
img_path = os.path.join(
app_path, 'migrations', 'avatars', img_name + '.jpg')
# print(img_path)
img_fd = open(img_path, 'rb')
data = {'avatar': img_fd}
encoded_data = encode_multipart(BOUNDARY, data)
response = client.patch(request_url, content_type=MULTIPART_CONTENT,
data=encoded_data)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret["done"], "true")
profile_after = Profile.objects.get(user=user)
# print(profile_after.avatar_url())
self.assertTrue(profile_after.avatar.url.find(img_name) >= 0)
def test_modify_avatar_by_teacher(self):
# Teacher role not allowed to modify avatar.
username = "test1"
password = "123123"
user = User.objects.get(username=username)
change_profile_perm = Permission.objects.get(name='Can change profile')
user.user_permissions.add(change_profile_perm)
user.save()
client = Client()
client.login(username=username, password=password)
request_url = "/api/v1/profiles/%d" % (user.profile.pk,)
img_name = 'img0' # NOTE: seq is 0 not 1, seq of the user 'parent1'
img_path = os.path.join(
app_path, 'migrations', 'avatars', img_name + '.jpg')
# print(img_path)
img_fd = open(img_path, 'rb')
data = {'avatar': img_fd}
encoded_data = encode_multipart(BOUNDARY, data)
response = client.patch(request_url, content_type=MULTIPART_CONTENT,
data=encoded_data)
self.assertEqual(409, response.status_code)
def test_concrete_time_slots(self):
client = Client()
url = ("/api/v1/concrete/timeslots" +
"?hours=100&weekly_time_slots=3+8+18&teacher=1")
response = client.get(url)
self.assertEqual(response.status_code, 200)
def test_send_push(self):
send_push('Hello')
def test_coupons(self):
done = False
for i in range(5):
client = Client()
username = "parent%d" % i
password = "123123"
client.login(username=username, password=password)
request_url = "/api/v1/coupons"
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
for coupon in json_ret['results']:
if coupon['used']:
done = True
if coupon['expired_at'] <= timezone.now().timestamp():
done = True
request_url = "/api/v1/coupons?only_valid=true"
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
for coupon in json_ret['results']:
self.assertFalse(coupon['used'])
self.assertTrue(
coupon['expired_at'] > timezone.now().timestamp())
if done:
break
self.assertTrue(done)
def test_create_order(self):
client = Client()
username = "parent2"
password = "123123"
client.login(username=username, password=password)
subject = Subject.objects.get(pk=1)
grade = Grade.objects.get(pk=2)
ability = Ability.objects.get(subject=subject, grade=grade)
teacher_id = 2
teacher = Teacher.objects.get(pk=teacher_id)
# make sure the sample teacher is published
teacher.published = True
teacher.save()
school = School.objects.get(pk=1)
hours = 14
prices_set = teacher.level.priceconfig_set.filter(
deleted=False,
school=school,
grade=grade,
min_hours__lte=hours,
).order_by('-min_hours')
price_obj = prices_set.first()
price = price_obj.price
coupon = Coupon.objects.get(pk=2)
# 保留原始过期时间
org_expired_at = coupon.expired_at
coupon.used = False
coupon.save()
request_url = "/api/v1/orders"
json_data = json.dumps({
'teacher': teacher.id,
'school': 1,
'grade': grade.id,
'subject': subject.id,
'coupon': coupon.id,
'hours': hours,
'weekly_time_slots': [3, 8],
})
# 设置奖学金课时条件不满足
coupon.mini_course_count = hours + 5
coupon.mini_total_price = 0
coupon.save()
response = client.post(request_url, content_type="application/json",
data=json_data,)
# 奖学金校验失败
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertFalse(json_ret['ok'])
self.assertEqual(-2, json_ret['code'])
self.assertFalse(coupon.used)
# 设置奖学金最低订单价格不满足
coupon.mini_course_count = 0
coupon.mini_total_price = price * hours + 500
coupon.save()
response = client.post(request_url, content_type="application/json",
data=json_data, )
# 奖学金校验失败
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertFalse(json_ret['ok'])
self.assertEqual(-2, json_ret['code'])
self.assertFalse(coupon.used)
# 设置奖学金可使用时间不满足
coupon.mini_course_count = 0
coupon.mini_total_price = 0
coupon.expired_at = timezone.now() - datetime.timedelta(minutes=1)
coupon.save()
response = client.post(request_url, content_type="application/json",
data=json_data, )
# 奖学金校验失败
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertFalse(json_ret['ok'])
self.assertEqual(-2, json_ret['code'])
self.assertFalse(coupon.used)
# 设置奖学金限制条件为满足
coupon.mini_total_price = 0
coupon.mini_course_count = 0
coupon.expired_at = org_expired_at
coupon.save()
response = client.post(request_url, content_type="application/json",
data=json_data, )
# 奖学金校验成功
self.assertEqual(201, response.status_code)
coupon = Coupon.objects.get(pk=2)
self.assertTrue(coupon.used)
pk = json.loads(response.content.decode())['id']
request_url = "/api/v1/orders/%d" % pk
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret['status'], 'u')
# Test create charge object
json_data = json.dumps({
'action': 'pay', 'channel': 'alipay',
})
response = client.patch(request_url, content_type="application/json",
data=json_data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content.decode())
charge_id = data['id']
json_data = json.dumps({
"id": "evt_ugB6x3K43D16wXCcqbplWAJo",
"created": 1440407501,
"livemode": False,
"type": "charge.succeeded",
"data": {
"object": {
"id": charge_id,
"object": "charge",
"created": 1440407501,
"livemode": True,
"paid": True,
"refunded": False,
"app": "app_urj1WLzvzfTK0OuL",
"channel": "upacp",
"order_no": "123456789",
"client_ip": "127.0.0.1",
"amount": 100,
"amount_settle": 0,
"currency": "cny",
"subject": "Your Subject",
"body": "Your Body",
"extra": {
},
"time_paid": 1440407501,
"time_expire": 1440407501,
"time_settle": None,
"transaction_no": "1224524301201505066067849274",
"refunds": {
"object": "list",
"url": "/v1/charges/ch_Xsr7u35O3m1Ged2ODmi4Lw/refunds",
"has_more": False,
"data": [
]
},
"amount_refunded": 0,
"failure_code": None,
"failure_msg": None,
"metadata": {
},
"credential": {
},
"description": None
}
},
"object": "event",
"pending_webhooks": 0,
"request": "iar_qH4y1KbTy5eLGm1uHSTS00s"
})
request_url = '/api/v1/charge_succeeded'
response = client.post(
request_url, content_type="application/json", data=json_data)
self.assertEqual(200, response.status_code)
order = Order.objects.get(pk=pk)
self.assertEqual(order.status, 'p')
# validate time slots of the order
self.assertTrue(order.is_timeslot_allocated())
request_url = "/api/v1/timeslots"
response = client.get(request_url)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(len(order.timeslots()), json_ret['count'])
request_url = "/api/v1/subject/1/record"
response = client.get(request_url)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret['evaluated'])
# Available to oneself
request_url = "/api/v1/teachers/" + str(teacher_id) + "/weeklytimeslots?school_id=1"
response = client.get(request_url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode())
for value in data.values():
for d in value:
self.assertTrue(d['available'])
# Concrete time slot
hours = 6
weekly_time_slots = list(
WeeklyTimeSlot.objects.filter(pk__in=[3, 8, 20]))
teacher = Teacher.objects.get(pk=teacher_id)
timeslots = Order.objects.concrete_timeslots(
hours, weekly_time_slots, teacher)
self.assertEqual(len(timeslots), 3)
wts = weekly_time_slots[2]
for ts in timeslots:
self.assertEqual(
timezone.localtime(ts['start']).isoweekday(), wts.weekday)
self.assertEqual(
timezone.localtime(ts['start']).time(), wts.start)
# Available time for other parent
client = Client()
username = "parent1"
password = "123123"
client.login(username=username, password=password)
request_url = "/api/v1/teachers/" + str(teacher_id) + "/weeklytimeslots?school_id=1"
response = client.get(request_url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode())
for value in data.values():
for d in value:
if d['id'] in [3, 8]:
self.assertFalse(d['available'])
else:
self.assertTrue(d['available'])
# Available time for other parent for different school
request_url = "/api/v1/teachers/" + str(teacher_id) + "/weeklytimeslots?school_id=2"
response = client.get(request_url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode())
for value in data.values():
for d in value:
if d['id'] in [3, 4, 8, 9]:
self.assertFalse(d['available'])
else:
self.assertTrue(d['available'])
# Timeslot in order creation has been occupied
client = Client()
username = "parent0"
password = "123123"
client.login(username=username, password=password)
request_url = "/api/v1/orders"
json_data = json.dumps({
'teacher': teacher_id, 'school': 1, 'grade': 2, 'subject': 1,
'coupon': None, 'hours': 14, 'weekly_time_slots': [3, 6],
})
response = client.post(request_url, content_type="application/json",
data=json_data, )
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertFalse(json_ret['ok'])
self.assertEqual(-1, json_ret['code'])
def test_create_live_class_order(self):
# login as superuser
client = Client()
client.login(username='test', password='mala-test')
# create classroom
response = client.get(reverse("staff:create_room"))
self.assertEqual(response.status_code, 200)
data = {'school': 1, 'name': 'test_room', 'capacity': 20}
response = client.post(reverse("staff:create_room"), data=data)
self.assertEqual(response.status_code, 200)
# create live course and class
if Lecturer.objects.exists():
lecturer = Lecturer.objects.first()
else:
lecturer = Lecturer(name='何芳')
lecturer.save()
response = client.get(reverse("staff:live_course"))
self.assertEqual(response.status_code, 200)
data = {"course_no": "1002", "name": "新概念英语",
"period_desc": "每周六 08:00-10:00;每周日 10:20-12:20",
"grade_desc": "小学四-六年级", "subject": 2, "fee": "480",
"description": "blah blah blah", "lecturer": lecturer.id,
"class_rooms": [{"id": ClassRoom.objects.first().id,
"assistant": Teacher.objects.first().id}],
"course_times": [{"start": 1477699200, "end": 1477706400},
{"start": 1477794000, "end": 1477801200}]}
response = client.post(reverse("staff:live_course"),
data={"data": json.dumps(data)})
self.assertEqual(response.status_code, 200)
# test live class list and instance
client = Client()
url = "/api/v1/liveclasses"
response = client.get(url)
self.assertEqual(response.status_code, 200)
url = "/api/v1/liveclasses?school=1"
response = client.get(url)
self.assertEqual(response.status_code, 200)
url = "/api/v1/liveclasses/1"
response = client.get(url)
self.assertEqual(response.status_code, 200)
# create order for live course
client = Client()
username = "parent2"
password = "123123"
client.login(username=username, password=password)
request_url = "/api/v1/orders"
json_data = json.dumps({
'live_class': LiveClass.objects.first().id
})
response = client.post(request_url, content_type="application/json",
data=json_data, )
self.assertEqual(201, response.status_code)
pk = json.loads(response.content.decode())['id']
request_url = "/api/v1/orders/%d" % pk
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret['status'], 'u')
order = Order.objects.get(pk=int(pk))
ans = Order.objects.get_order_timeslots(order, False)
self.assertNotEqual(len(ans), 0)
def test_cancel_order(self):
client = Client()
username = "parent2"
password = "123123"
client.login(username=username, password=password)
parent = Parent.objects.get(user__username=username)
teacher = Teacher.objects.order_by('?').first()
school = School.objects.order_by('?').first()
ability = Ability.objects.filter(grade_id=2).first()
teacher.abilities.add(ability)
grade = ability.grade
subject = teacher.subject()
hours = 2
order = Order.objects.create(
parent=parent, teacher=teacher, school=school,
grade=grade, subject=subject, hours=hours, coupon=None)
order.save()
request_url = "/api/v1/orders/%s" % order.id
response = client.delete(request_url)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret['ok'])
canceled_order = Order.objects.get(id=order.id)
self.assertEqual(canceled_order.status, Order.CANCELED)
# test with coupon
coupon = Coupon.objects.filter(parent=parent, used=False).order_by('?').first()
if coupon is None:
return
order = Order.objects.create(
parent=parent, teacher=teacher, school=school,
grade=grade, subject=subject, hours=hours, coupon=coupon)
order.save()
used_coupon = Coupon.objects.get(id=coupon.id)
self.assertTrue(used_coupon.used)
request_url = "/api/v1/orders/%s" % order.id
response = client.delete(request_url)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertTrue(json_ret['ok'])
canceled_order = Order.objects.get(id=order.id)
self.assertEqual(canceled_order.status, Order.CANCELED)
canceled_coupon = Coupon.objects.get(id=coupon.id)
self.assertFalse(canceled_coupon.used)
def test_subject_record(self):
client = Client()
username = "parent1"
password = "123123"
client.login(username=username, password=password)
request_url = "/api/v1/subject/2/record"
response = client.get(request_url)
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertFalse(json_ret['evaluated'])
def test_create_comment(self):
username = "parent0"
password = "123123"
user = User.objects.get(username=username)
parent = user.parent
order = parent.order_set.all()[0]
timeslot = order.timeslot_set.filter(deleted=False)[0]
client = Client()
client.login(username=username, password=password)
request_url = "/api/v1/comments"
json_data = json.dumps({
'timeslot': timeslot.pk, 'score': 5, 'content': 'Good.'})
response = client.post(request_url, content_type="application/json",
data=json_data)
self.assertEqual(201, response.status_code)
pk = json.loads(response.content.decode())['id']
request_url = "/api/v1/comments/%d" % pk
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertEqual(json_ret['score'], 5)
self.assertEqual(json_ret['content'], 'Good.')
# Create a comment for a timeslot for a order not belongs to cur user
user2 = User.objects.get(username='parent4')
parent2 = user2.parent
order2 = parent2.order_set.all()[0]
timeslot2 = order2.timeslot_set.filter(deleted=False)[0]
request_url = "/api/v1/comments"
json_data = json.dumps({
'timeslot': timeslot2.pk, 'score': 5, 'content': 'Good.'})
response = client.post(request_url, content_type="application/json",
data=json_data)
self.assertEqual(400, response.status_code)
def test_timeslots_second(self):
timeslots = TimeSlot.objects.filter(start__second__gt=0)
self.assertEqual(len(timeslots), 0)
timeslots = TimeSlot.objects.filter(end__second__gt=0)
self.assertEqual(len(timeslots), 0)
def test_orders_timeslots(self):
def weekly_2_mins(weekly):
return ((weekly.weekday - 1) * 24 * 60 + weekly.start.hour * 60 +
weekly.start.minute, (weekly.weekday - 1) * 24 * 60 +
weekly.end.hour * 60 + weekly.end.minute)
orders = Order.objects.filter(status='p')
for order in orders:
timeslots = order.timeslot_set.filter(deleted=False)
weekly_time_slots = order.weekly_time_slots.all()
mins = [weekly_2_mins(x) for x in weekly_time_slots]
for timeslot in timeslots:
timeslot.start = timezone.localtime(timeslot.start)
timeslot.end = timezone.localtime(timeslot.end)
cur_min = (
timeslot.start.weekday() * 24 * 60 +
timeslot.start.hour * 60 + timeslot.start.minute,
timeslot.end.weekday() * 24 * 60 +
timeslot.end.hour * 60 + timeslot.end.minute)
self.assertIn(cur_min, mins)
def test_teacher_timeslot(self):
teachers = Teacher.objects.all()
for teacher in teachers:
orders = teacher.order_set.filter(status='p')
timeslots = list(
itertools.chain(
*(order.timeslot_set.filter(deleted=False)
for order in orders)))
timeslots.sort(key=lambda x: (x.start, x.end))
for i, ts in enumerate(timeslots):
if i == 0:
continue
pre_ts = timeslots[i - 1]
self.assertLessEqual(pre_ts.end, ts.start)
if pre_ts.order.school != ts.order.school:
self.assertLessEqual(
pre_ts.end + ts.TRAFFIC_TIME, ts.start)
def test_get_timeslots(self):
username = "parent0"
password = "123123"
client = Client()
client.login(username=username, password=password)
request_url = "/api/v1/timeslots"
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
json_ret = json.loads(response.content.decode())
self.assertIn('is_expired', json_ret['results'][0])
def test_get_passed_timeslots(self):
username = "parent0"
password = "123123"
client = Client()
client.login(username=username, password=password)
request_url = "/api/v1/timeslots?for_review=true"
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
def test_audit_record(self):
teacher = Teacher.objects.all()[0]
teacher.status = Teacher.TO_CHOOSE
teacher.set_status(teacher.user, teacher.TO_INTERVIEW)
#print(AuditRecord.objects.all())
def test_teacher_profile(self):
teachers = Teacher.objects.all()
for teacher in teachers:
user = teacher.user
profile = user.profile
self.assertIsNotNone(profile)
def test_kuailexue_study_report(self):
username = "parent1"
password = "123123"
client = Client()
client.login(username=username, password=password)
params = klx.klx_build_params({'uid': '12345678'}, False)
klx.klx_sign_params(params)
self.assertTrue(klx.klx_verify_sign(params))
request_url = "/api/v1/study_report"
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
math_id = Subject.objects.get(name='数学').id
request_url = "/api/v1/study_report/%s"%(math_id)
response = client.get(request_url, content_type='application/json')
self.assertEqual(200, response.status_code)
def test_schools(self):
client = Client()
request_url = "/api/v1/schools"
region = Region.objects.filter(opened=True).first()
response = client.get(request_url + '?region='+str(region.id), content_type='application/json')
self.assertEqual(200, response.status_code)
teacher = Teacher.objects.first()
response = client.get(request_url + '?teacher='+str(teacher.id), content_type='application/json')
self.assertEqual(200, response.status_code)
def test_teacher_school_prices(self):
client = Client()
teacher = Teacher.objects.first()
school = teacher.schools.first() or School.objects.first()
url = reverse('teacher_school_prices', kwargs={
'teacher_id': teacher.id, 'school_id': school.id})
response = client.get(url)
self.assertEqual(200, response.status_code)
def test_pad_login(self):
client = Client()
parent = Parent.objects.first()
phone = parent.user.profile.phone
url = reverse('pad_login')
response = client.get(url + '?phone=%s' % phone)
self.assertEqual(403, response.status_code)
response = client.post(url, data={'phone': phone})
self.assertEqual(200, response.status_code)
class TestModels(TestCase):
def setUp(self):
self.assertTrue(settings.FAKE_SMS_SERVER)
def tearDown(self):
pass
def test_get_save_account(self):
new_teacher = Teacher.new_teacher("12345")
new_teacher.teacher.safe_get_account()
def test_new_teacher(self):
new_teacher = Teacher.new_teacher("12345")
self.assertTrue(isinstance(new_teacher, User))
def test_sms_verify(self):
phone = "18922405996"
send_result, sms_code = Checkcode.generate(phone)
self.assertTrue(Checkcode.verify(phone, sms_code)[0])
self.assertFalse(Checkcode.verify(phone, "error_code")[0])
def test_other_region(self):
"""
检查其它是否已经从数据库中移除
"""
Region.objects.get(name="其他")
with self.assertRaises(Region.DoesNotExist):
Region.objects.get(name="其它")
def test_school_price(self):
school = School.objects.all().first()
school.priceconfig_set.clear()
school.init_prices()
self.assertTrue(school.priceconfig_set.count() > 0)
class TestAlgorithm(SimpleTestCase):
def test_tree_insert(self):
tree = Tree()
tree.root = Node("a")
tree.insert_val("a", "b", "c")
tree.insert_val("b", "d", "e")
self.assertEqual(tree.get_val("d").val, "d")
self.assertEqual(tree.get_path("d"), ["a", "b", "d"])
self.assertEqual(tree.get_path("e"), ["a", "b", "e"])
self.assertEqual(tree.get_path("c"), ["a", "c"])
self.assertEqual(tree.get_path("b"), ["a", "b"])
def test_parse_int(self):
self.assertTrue(parseInt(None) == 'NaN')
self.assertTrue(parseInt('') == 'NaN')
self.assertTrue(parseInt(123) == 123)
self.assertTrue(parseInt(-123) == -123)
self.assertTrue(parseInt('123') == 123)
self.assertTrue(parseInt('-123') == -123)
self.assertTrue(parseInt('123asd') == 123)
self.assertTrue(parseInt('-123asd') == -123)
self.assertTrue(parseInt(234.234) == 234)
self.assertTrue(parseInt(-234.234) == -234)
self.assertTrue(parseInt('234.234') == 234)
self.assertTrue(parseInt('-234.234') == -234)
self.assertTrue(parseInt('asd') == 'NaN')
self.assertTrue(parseInt('-asd') == 'NaN')
def test_parse_date(self):
self.assertEqual(parse_date('2016-06-1', False), datetime.datetime(2016,6,1))
self.assertEqual(parse_date('2016-06-1'), timezone.make_aware(datetime.datetime(2016,6,1)))
self.assertEqual(parse_date('2016-06-18', False), datetime.datetime(2016,6,18))
self.assertEqual(parse_date('2016-06-18', True), timezone.make_aware(datetime.datetime(2016,6,18)))
self.assertEqual(parse_date('2016-12-08', False), datetime.datetime(2016,12,8))
self.assertEqual(parse_date('2016-12-08'), timezone.make_aware(datetime.datetime(2016,12,8)))
self.assertEqual(parse_date('2016-12-08 4', False), datetime.datetime(2016,12,8,4))
self.assertEqual(parse_date('2016-12-08 4', True), timezone.make_aware(datetime.datetime(2016,12,8,4)))
self.assertEqual(parse_date('2016-12-08 23', False), datetime.datetime(2016,12,8,23))
self.assertEqual(parse_date('2016-12-08 23'), timezone.make_aware(datetime.datetime(2016,12,8,23)))
self.assertEqual(parse_date('2016-12-08 05:24', False), datetime.datetime(2016,12,8,5,24))
self.assertEqual(parse_date('2016-12-08 05:24', True), timezone.make_aware(datetime.datetime(2016,12,8,5,24)))
self.assertEqual(parse_date('2016-12-08 23:09:25', False), datetime.datetime(2016,12,8,23,9,25))
self.assertEqual(parse_date('2016-12-08 23:09:25'), timezone.make_aware(datetime.datetime(2016,12,8,23,9,25)))
self.assertEqual(parse_date_next('2016-06-1', False), datetime.datetime(2016,6,2))
self.assertEqual(parse_date_next('2016-06-18', False), datetime.datetime(2016,6,19))
self.assertEqual(parse_date_next('2016-12-31', False), datetime.datetime(2017,1,1))
self.assertEqual(parse_date_next('2016-12-08 4', False), datetime.datetime(2016,12,8,5))
self.assertEqual(parse_date_next('2016-12-08 23', False), datetime.datetime(2016,12,9,0))
self.assertEqual(parse_date_next('2016-12-08 05:24', False), datetime.datetime(2016,12,8,5,25))
self.assertEqual(parse_date_next('2016-12-08 05:59', False), datetime.datetime(2016,12,8,6,0))
self.assertEqual(parse_date_next('2016-12-08 23:09:25', False), datetime.datetime(2016,12,8,23,9,26))
self.assertEqual(parse_date_next('2016-12-08 23:09:59', False), datetime.datetime(2016,12,8,23,10,0))
def test_verify_sig(self):
sig = (
b'PcU0SMJhbPObiIVinNnalZOjI02koWozxLrxa3WQW3rK/n7I+EuVGuXvh' +
b'sq2MIfUaNiHZDgRFYybGtKr1uuFzEXjA4PwmnDHfWgwRPdjgseoU0eke6' +
b'ZqGpklBRVTbF6PUy6/vAqur4xb7h1wpdrteUpCPafzDmVPsQLicdojJ/T' +
b'F9ACjQW8gTNiS6tE9gL5hxy0RJ3/okRJo6dz2pvJBWkjCrgp/r98z/LQi' +
b'jA1o//atZrH63+DcL/GwEOgaymqbodzusXF+g6WMJ/GTJgjdPRHvpO9UA' +
b'AUKkOQqvwthJvsXIH/L1xqvy+tFpo2J0Ptwg85bowKoyy1qC5ak3sqWqw' +
b'==')
data = ('{"id":"evt_04qN8cXQvIhssduhS4hpqd9p","created":1427555016,' +
'"livemode":false,"type":"account.summary.available","data"' +
':{"object":{"acct_id":"acct_0eHSiDyzv9G09ejT","object":"ac' +
'count_daily_summary","acct_display_name":"xx公司","created' +
'":1425139260,"summary_from":1425052800,"summary_to":142513' +
'9199,"charges_amount":1000,"charges_count":100}},"object":' +
'"event","pending_webhooks":2,"request":null,"scope":"acct_' +
'1234567890123456","acct_id":"acct_1234567890123456"}')
data = data.encode('utf-8')
pubkey = b'''-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzs8SiPoFQT9K0lWa6WSx
0d6UnA391KM2aFwijY0AK7r+MiAe07ivenopzFL3dqIRhQjuP7d30V85kWydN5UZ
cm/tZgm4K+8wttb988hOrzSjtPOMghHK+bnDwE8FIB+ZbHAZCEVhNfE6i9kLGbHH
Q617+mxUTJ3yEZG9CIgke475o2Blxy4UMsRYjo2gl5aanzmOmoZcbiC/R5hXSQUH
XV9/VzA7U//DIm8Xn7rerd1n8+KWCg4hrIIu/A0FKm8zyS4QwAwQO2wdzGB0h15t
uFLhjVz1W5ZPXjmCRLzTUoAvH12C6YFStvS5kjPcA66P1nSKk5o3koSxOumOs0iC
EQIDAQAB
-----END PUBLIC KEY-----'''
self.assertTrue(verify_sig(data, sig, pubkey))
|
stuaxo/mnd
|
refs/heads/master
|
tests/match_test.py
|
1
|
import collections
import unittest
from mnd.match import arg_match
# we will test arg matching against instances of this Msg type
Msg = collections.namedtuple("Msg", "note type")
class ArgMatchTest(unittest.TestCase):
def test_arg(self):
self.assertTrue(arg_match(1, 1))
def test_args(self):
self.assertTrue(arg_match([1], [1]))
def test_subarg(self):
note1 = Msg(note=1, type="note_on")
self.assertTrue(arg_match(dict(note=1), note1))
|
CMPS242-fsgh/project
|
refs/heads/master
|
classifier.py
|
1
|
import numpy as np
import scipy
import scipy.optimize
from scipy.special import expit
class NaiveBayes:
def __init__(self):
self._prior = None
self._mat = None
def fit(self, X, y):
y = np.matrix(y)
#print type(y)
#y = y.T
p1 = y*X
p2 = (1-y)*X
p = np.vstack([
np.log(p1+1) - np.log(p1.sum() + p1.shape[1]),
np.log(p2+1) - np.log(p2.sum() + p2.shape[1])])
pri = np.matrix([[float(y.sum())/y.shape[1]], [1 - float(y.sum())/y.shape[1] ]])
self._prior = np.log(pri)
self._mat = p
#print self._prior, self._mat
return p, pri
def predict(self, mat):
logp = self._mat*mat.T + self._prior
ans = (np.sign(logp[0] - logp[1]) + 1)/2
return ans.A1
def validate(self, mat, real_y):
predict_y = self.predict_many(mat)
return (predict_y == real_y).sum()
class LogisticRegression:
def __init__(self, lbd = 1.):
self._w = None
self._lbd = lbd
def _intercept_dot(self, w, X):
#z = scipy.dot(X, w[:-1])
#print X.shape, w[:-1].shape
z = X*w[:-1]
#z = safe_sparse_dot(X, w[:-1])
#print X*w[:-1]
z = z + w[-1]
#z = X * w[:-1] + w[-1]
return z
def fit(self, X, y):
n_features = X.shape[1]
n_data = X.shape[0]
#self._w = np.ones(n_features + 1)
mask = (y == 1)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
#print y_bin
def _loss(w):
z = self._intercept_dot(w, X)
#loss = scipy.dot(y, scipy.log(sigmoid(z))) + scipy.dot(1-y, scipy.log(1-sigmoid(z)))
#print (y * scipy.logaddexp(0., -z))
#print scipy.log(sigmoid(z))
#print "---"
loss = scipy.sum(y * scipy.logaddexp(0., -z) + (1-y)*scipy.logaddexp(0., z))
return loss+self._lbd/2*scipy.dot(w,w)
def _grad(w):
z = self._intercept_dot(w, X)
z = expit(z)
grad = np.empty_like(w)
#z = expit(y_bin * z)
#z0 = (z - 1) * y_bin
#print X.T.toarray()
#print (z-y)
#print X.T*(z-y)
#print '---'
grad[:n_features] = X.T * (z - y) + self._lbd*w[:-1]
grad[-1] = scipy.sum(z - y) + self._lbd*w[-1]
return grad
def _hess(w):
h = np.empty_like(w)
z = self._intercept_dot(w, X)
z = expit(z)
d = z(1-z)
R = scipy.sparse.dia_matrix((d, 0),shape=(n_data, n_data))
h[:n_features] = scipy.dot(X.T, X*R)
#ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
#hessian_value = numpy.dot(X.T, X * S[:, numpy.newaxis])
#opt = scipy.optimize.minimize(_loss, scipy.ones(n_features + 1), method='Powell', jac=_grad)
#print opt['x']
opt = scipy.optimize.minimize(_loss, scipy.ones(n_features + 1), method='Newton-CG', jac=_grad)
#print opt['x']
#print opt
#print X.shape, np.hstack([X, np.ones(n_data)]).shape
self._w = opt['x']
def predict1(self, x):
z = self._intercept_dot(self._w, x)
return expit(z), 1-expit(z)
def predict(self, X):
Z = expit(self._intercept_dot(self._w, X))
mask = Z > 0.5
r = scipy.zeros(X.shape[0])
r[mask] = 1
return r
def validate(self, X, y):
mask = self.predict_many(X) > 0.5
Z = scipy.zeros(y.shape)
Z[mask] = 1
return (Z == y).sum()
if __name__ == '__main__':
import loader
from sklearn.feature_extraction.text import HashingVectorizer
d = loader.DataLoader()
g = d.alldata()
def iter_data(n, y, cat):
c = 0
for business in g:
if c % 1000 == 0:
print c, '/', n
if c<n:
if cat.decode('utf-8') in business.categories:
y[c] = 1
else:
y[c] = 0
yield "".join(business.reviews)
else:
return
c += 1
# f = open('data/yelp.csv')
# def iter_data(n, y, cat):
# c = 0
# for line in f:
# if c % 1000 == 0:
# print c, '/', n
# if c < n:
# b_id, categories, review = line.split('\t')
# categories = categories.split(',')
# if cat in categories:
# y[c] = 1
# else:
# y[c] = 0
# yield review
# else:
# return
# c += 1
import feature
from sklearn.feature_extraction.text import CountVectorizer
#v = feature.CountFeature()
v = HashingVectorizer(stop_words='english', non_negative=True, norm=None)
#v = CountVectorizer()
#v._validate_vocabulary()
if True:
n = 40
y = np.zeros(n)
producer = d.binary_producer(n, y, 'Restaurants')
mat = v.transform(producer())
print 'data readed', mat.shape, y.shape
nt = 10
yt = np.zeros(nt)
mt = v.transform(iter_data(nt, yt, 'Restaurants'))
#print yt
else:
mat = v.transform([
"Chinese Beijing Chinese",
"Chinese Chinese Shanghai",
"Chinese Macao",
"Tokyo Japan Chinese"
])
y = scipy.array([1,1,1,0])
n = 4
mt = v.transform([
"Chinese Chinese Chinese Tokyo Japan"
])
yt = scipy.array(np.ones(1))
nt = 1
print mat.shape, mt.shape
print 'our code',
mm = NaiveBayes()
mm.train(mat, y)
print float(mm.validate(mt, yt))/nt
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
clf = model.fit(mat, y)
print 'model trained'
s = model.score(mt, yt)
print s
#print mat
from sklearn.linear_model import LogisticRegression as LR
m = LR()
m.fit(mat, y)
print m.predict(mt)
|
princeofdarkness76/thefuck
|
refs/heads/master
|
tests/rules/test_fix_file.py
|
12
|
import pytest
import os
from thefuck.rules.fix_file import match, get_new_command
from tests.utils import Command
# (script, file, line, col (or None), stdout, stderr)
tests = (
('gcc a.c', 'a.c', 3, 1, '',
"""
a.c: In function 'main':
a.c:3:1: error: expected expression before '}' token
}
^
"""),
('clang a.c', 'a.c', 3, 1, '',
"""
a.c:3:1: error: expected expression
}
^
"""),
('perl a.pl', 'a.pl', 3, None, '',
"""
syntax error at a.pl line 3, at EOF
Execution of a.pl aborted due to compilation errors.
"""),
('perl a.pl', 'a.pl', 2, None, '',
"""
Search pattern not terminated at a.pl line 2.
"""),
('sh a.sh', 'a.sh', 2, None, '',
"""
a.sh: line 2: foo: command not found
"""),
('zsh a.sh', 'a.sh', 2, None, '',
"""
a.sh:2: command not found: foo
"""),
('bash a.sh', 'a.sh', 2, None, '',
"""
a.sh: line 2: foo: command not found
"""),
('rustc a.rs', 'a.rs', 2, 5, '',
"""
a.rs:2:5: 2:6 error: unexpected token: `+`
a.rs:2 +
^
"""),
('cargo build', 'src/lib.rs', 3, 5, '',
"""
Compiling test v0.1.0 (file:///tmp/fix-error/test)
src/lib.rs:3:5: 3:6 error: unexpected token: `+`
src/lib.rs:3 +
^
Could not compile `test`.
To learn more, run the command again with --verbose.
"""),
('python a.py', 'a.py', 2, None, '',
"""
File "a.py", line 2
+
^
SyntaxError: invalid syntax
"""),
('python a.py', 'a.py', 8, None, '',
"""
Traceback (most recent call last):
File "a.py", line 8, in <module>
match("foo")
File "a.py", line 5, in match
m = re.search(None, command)
File "/usr/lib/python3.4/re.py", line 170, in search
return _compile(pattern, flags).search(string)
File "/usr/lib/python3.4/re.py", line 293, in _compile
raise TypeError("first argument must be string or compiled pattern")
TypeError: first argument must be string or compiled pattern
"""),
('ruby a.rb', 'a.rb', 3, None, '',
"""
a.rb:3: syntax error, unexpected keyword_end
"""),
('lua a.lua', 'a.lua', 2, None, '',
"""
lua: a.lua:2: unexpected symbol near '+'
"""),
('fish a.sh', '/tmp/fix-error/a.sh', 2, None, '',
"""
fish: Unknown command 'foo'
/tmp/fix-error/a.sh (line 2): foo
^
"""),
('./a', './a', 2, None, '',
"""
awk: ./a:2: BEGIN { print "Hello, world!" + }
awk: ./a:2: ^ syntax error
"""),
('llc a.ll', 'a.ll', 1, 2, '',
"""
llc: a.ll:1:2: error: expected top-level entity
+
^
"""),
('go build a.go', 'a.go', 1, 2, '',
"""
can't load package:
a.go:1:2: expected 'package', found '+'
"""),
('make', 'Makefile', 2, None, '',
"""
bidule
make: bidule: Command not found
Makefile:2: recipe for target 'target' failed
make: *** [target] Error 127
"""),
('git st', '/home/martin/.config/git/config', 1, None, '',
"""
fatal: bad config file line 1 in /home/martin/.config/git/config
"""),
('node fuck.js asdf qwer', '/Users/pablo/Workspace/barebones/fuck.js', '2', 5, '',
"""
/Users/pablo/Workspace/barebones/fuck.js:2
conole.log(arg); // this should read console.log(arg);
^
ReferenceError: conole is not defined
at /Users/pablo/Workspace/barebones/fuck.js:2:5
at Array.forEach (native)
at Object.<anonymous> (/Users/pablo/Workspace/barebones/fuck.js:1:85)
at Module._compile (module.js:460:26)
at Object.Module._extensions..js (module.js:478:10)
at Module.load (module.js:355:32)
at Function.Module._load (module.js:310:12)
at Function.Module.runMain (module.js:501:10)
at startup (node.js:129:16)
at node.js:814:3
"""),
('pep8', './tests/rules/test_systemctl.py', 17, 80,
"""
./tests/rules/test_systemctl.py:17:80: E501 line too long (93 > 79 characters)
./tests/rules/test_systemctl.py:18:80: E501 line too long (103 > 79 characters)
./tests/rules/test_whois.py:20:80: E501 line too long (89 > 79 characters)
./tests/rules/test_whois.py:22:80: E501 line too long (83 > 79 characters)
""", ''),
('py.test', '/home/thefuck/tests/rules/test_fix_file.py', 218, None,
"""
monkeypatch = <_pytest.monkeypatch.monkeypatch object at 0x7fdb76a25b38>
test = ('fish a.sh', '/tmp/fix-error/a.sh', 2, None, '', "\\nfish: Unknown command 'foo'\\n/tmp/fix-error/a.sh (line 2): foo\\n ^\\n")
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_get_new_command(monkeypatch, test):
> mocker.patch('os.path.isfile', return_value=True)
E NameError: name 'mocker' is not defined
/home/thefuck/tests/rules/test_fix_file.py:218: NameError
""", ''),
)
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_match(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=True)
monkeypatch.setenv('EDITOR', 'dummy_editor')
assert match(Command(stdout=test[4], stderr=test[5]))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_no_editor(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=True)
if 'EDITOR' in os.environ:
monkeypatch.delenv('EDITOR')
assert not match(Command(stdout=test[4], stderr=test[5]))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_not_file(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=False)
monkeypatch.setenv('EDITOR', 'dummy_editor')
assert not match(Command(stdout=test[4], stderr=test[5]))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_get_new_command(mocker, monkeypatch, test):
mocker.patch('os.path.isfile', return_value=True)
monkeypatch.setenv('EDITOR', 'dummy_editor')
cmd = Command(script=test[0], stdout=test[4], stderr=test[5])
#assert (get_new_command(cmd, Settings({})) ==
# 'dummy_editor {} +{} && {}'.format(test[1], test[2], test[0]))
@pytest.mark.parametrize('test', tests)
@pytest.mark.usefixtures('no_memoize')
def test_get_new_command_with_settings(mocker, monkeypatch, test, settings):
mocker.patch('os.path.isfile', return_value=True)
monkeypatch.setenv('EDITOR', 'dummy_editor')
cmd = Command(script=test[0], stdout=test[4], stderr=test[5])
settings.fixcolcmd = '{editor} {file} +{line}:{col}'
if test[3]:
assert (get_new_command(cmd) ==
'dummy_editor {} +{}:{} && {}'.format(test[1], test[2], test[3], test[0]))
else:
assert (get_new_command(cmd) ==
'dummy_editor {} +{} && {}'.format(test[1], test[2], test[0]))
|
percy-g2/Novathor_xperia_u8500
|
refs/heads/master
|
6.2.A.1.100/external/webkit/LayoutTests/http/tests/websocket/tests/hybi/too-long-payload_wsh.py
|
11
|
import struct
import time
from mod_pywebsocket import common
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
length = 0x8000000000000000
# pywebsocket refuses to send a frame with too long payload.
# Thus, we need to build a frame manually.
header = chr(0x80 | common.OPCODE_TEXT) # 0x80 is for "fin" bit.
header += chr(127)
header += struct.pack('!Q', length)
request.connection.write(header)
# Send data indefinitely to simulate a real (broken) server sending a big frame.
# A client should ignore these bytes and abort the connection.
while True:
request.connection.write('X' * 4096)
time.sleep(1)
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/script.module.youtube.dl/lib/youtube_dl/extractor/vzaar.py
|
30
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
)
class VzaarIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|view)\.)?vzaar\.com/(?:videos/)?(?P<id>\d+)'
_TESTS = [{
'url': 'https://vzaar.com/videos/1152805',
'md5': 'bde5ddfeb104a6c56a93a06b04901dbf',
'info_dict': {
'id': '1152805',
'ext': 'mp4',
'title': 'sample video (public)',
},
}, {
'url': 'https://view.vzaar.com/27272/player',
'md5': '3b50012ac9bbce7f445550d54e0508f2',
'info_dict': {
'id': '27272',
'ext': 'mp3',
'title': 'MP3',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'http://view.vzaar.com/v2/%s/video' % video_id, video_id)
source_url = video_data['sourceUrl']
info = {
'id': video_id,
'title': video_data['videoTitle'],
'url': source_url,
'thumbnail': self._proto_relative_url(video_data.get('poster')),
'duration': float_or_none(video_data.get('videoDuration')),
}
if 'audio' in source_url:
info.update({
'vcodec': 'none',
'ext': 'mp3',
})
else:
info.update({
'width': int_or_none(video_data.get('width')),
'height': int_or_none(video_data.get('height')),
'ext': 'mp4',
})
return info
|
kinoshitajona/c9addmember
|
refs/heads/master
|
c9addmember/invite/apps.py
|
1
|
from __future__ import unicode_literals
from django.apps import AppConfig
class InviteConfig(AppConfig):
name = 'invite'
|
palazzem/google-blacklist-me
|
refs/heads/master
|
scraper/settings.py
|
1
|
# Scrapy settings for scraper project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'scraper'
SPIDER_MODULES = ['scraper.spiders']
NEWSPIDER_MODULE = 'scraper.spiders'
ITEM_PIPELINES = {
'scraper.pipelines.DuplicatesPipeline': 100
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scraper (+http://www.yourdomain.com)'
|
channeng/tictactoe
|
refs/heads/master
|
script.py
|
1
|
r1 = [0,0,0]
r2 = [0,0,0]
r3 = [0,0,0]
board = [r1,r2,r3]
# PRINTING BOARD & TURNS
def printBoard():
print "\n"
for i in board:
for n in i:
print n,
print "\n"
def Pturn(n):
print "Player " + str(n) + " :"
prow = int(raw_input("Which row? (1/2/3) \n > ")) -1
while (prow +1) <= 0 or (prow+1) > 3:
print "Please re-enter row:"
prow = int(raw_input("Which row? (1/2/3) \n > ")) -1
pcol = int(raw_input("Which col? (1/2/3) \n > ")) -1
while (pcol+1) <= 0 or (pcol+1) > 3:
print "Please re-enter col:"
pcol = int(raw_input("Which col? (1/2/3) \n > ")) -1
if board[prow][pcol] == 0:
board[prow][pcol] = n
printBoard()
# return rowCrit()
else:
printBoard()
print "Sorry, it's taken. Try again!"
Pturn(n)
# return False
def P2turn():
print "Player 2 :"
p2row = int(raw_input("Which row? (1/2/3) \n > ")) -1
p2col = int(raw_input("Which col? (1/2/3) \n > ")) -1
if board[p2row][p2col] == 0:
board[p2row][p2col] = 2
printBoard()
# return rowCrit()
else:
printBoard()
print "Sorry, it's taken. Try again!"
P2turn()
# return False
# GAME OVER CRITERIA
def endCrit():
for i in board:
for n in i:
if n == 0:
return False
break
else:
return True
def nrowmatch(nrow):
x = 0
y = 1
for v in nrow:
y = y*v
x = x+y
if x == 1 or x == 8:
return True
else:
return False
#def nrowmatch(nrow):
# for i in range(len(nrow) - 1):
# if nrow[i] == 0 or nrow[i] != nrow[i+1]:
# return False
# return True
def rowCrit():
for v in board:
if nrowmatch(v):
return True
break
else:
return False
def ncolmatch(ncol):
x = 0
y = 1
for i in range(3):
y = y*board[i][ncol]
x = x+y
if x == 1 or x == 8:
return True
else:
return False
def colCrit():
for ncol in range(len(board[0])):
if ncolmatch(ncol):
return True
break
else:
return False
def leftdiagCrit():
x = 0
y = 1
for i in range(3):
y = y*board[i][i]
x = x+y
if x == 1 or x == 8:
return True
else:
return False
def rightdiagCrit():
x = 0
y = 1
for i in range(3):
y = y*board[i][2-i]
x = x+y
if x == 1 or x == 8:
return True
else:
return False
def winCrit():
return rightdiagCrit() or leftdiagCrit() or colCrit() or rowCrit()
#GAME RUNS
printBoard()
while not winCrit():
Pturn(1)
if winCrit():
print "GAME OVER!!! P1 wins"
break
elif endCrit():
print "GAME OVER!!! No one wins..."
Pturn(2)
if endCrit():
print "GAME OVER!!! It's a draw..."
else: print "GAME OVER!!! P2 wins"
#win = ""
#while True:
# if P1turn():
# win = "p1"
# break
# if P2turn():
# win = "p2"
# break
#print "GAME OVER!!! " + win + " wins"
#SOME CHANGE SOMEWHERE
|
steppenwolf-sro/callback-schedule
|
refs/heads/master
|
callback_request/migrations/0002_auto_20161116_1940.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-16 19:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('callback_schedule', '0002_auto_20161116_1927'),
('callback_request', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='callbackrequest',
name='managers',
),
migrations.RemoveField(
model_name='callentry',
name='succeeded',
),
migrations.AddField(
model_name='callbackrequest',
name='phones',
field=models.ManyToManyField(to='callback_schedule.CallbackManagerPhone'),
),
migrations.AddField(
model_name='callentry',
name='state',
field=models.CharField(choices=[('processing', 'Processing'), ('success', 'Success'), ('failed', 'Failed')], default='processing', max_length=32),
),
]
|
jwren/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/multiLine/IndentMulti32.after.py
|
996
|
class C:
def foo(self):
x = 1
y = 2
y = 2
|
GunoH/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/multiLine/IndentMulti41.after.py
|
996
|
class C:
def foo(self):
x = 1
y = 2
y = 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.