repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
albertoconnor/website | wordpress_importer/tests/test_import_command.py | Python | mit | 41,498 | 0.001663 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import datetime
from collections import namedtuple
import mock
import six
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
from wagtail.wagtailcore.models import Page
from articles.models import ArticleCategory, ArticlePage
from images.models import AttributedImage
from people.models import ContributorPage
from wordpress_importer.management.commands import import_from_wordpress
from wordpress_importer.models import (ImageImport, ImportDownloadError,
PostImport)
class ImageCleanUp(object):
def delete_images(self):
# clean up any image files that were created.
images = AttributedImage.objects.all()
for image in images:
storage, path = image.file.storage, image.file.path
image.delete()
storage.delete(path)
FakeResponse = namedtuple('FakeResponse', 'status_code, content')
def local_get_successful(url):
"Fetch a stream from local files."
p_url = six.moves.urllib.parse.urlparse(url)
if p_url.scheme != 'file':
raise ValueError("Expected file scheme")
filename = six.moves.urllib.request.url2pathname(p_url.path)
response = FakeResponse(200, open(filename, 'rb').read())
return response
def local_get_404(url):
"Fetch a stream from local files."
response = FakeResponse(404, None)
return response
test_image_url = 'file:///{}/wordpress_importer/tests/files/testcat.jpg'.format(
settings.PROJECT_ROOT)
test_image_url_with_unicode = 'file:///{}/wordpress_importer/tests/files/testcat♥.jpg'.format(
settings.PROJECT_ROOT)
class TestCommandImportFromWordPressLoadContributors(TestCase, ImageCleanUp):
def setUp(self):
import_from_wordpress.Command.get_contributor_data = self.get_test_contributor_data
def tearDown(self):
self.delete_images()
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsCreatesContributor(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual(1, contributors.count())
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsFirstName(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('Bob', contributors.first().first_name)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsLastName(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('Smith', contributors.first().last_name)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsNickname(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('Bobby Smith', contributors.first().nickname)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsTwitterHandle(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('@bobsmith', contributors.first().twitter_handle)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsTwitterHandleFromUrl(self):
import_from_wordpress.Command.get_contributor_data = self.get_test_contributor_data_twitter_url
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('@bobsmith', contributors.first().twitter_handle)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsLongBio(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('Bob Smith is a person who does stuff.',
contributors.first().long_bio)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsShortBio(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='bob@example.com')
self.assertEqual('He does stuff.',
contributors.first().short_bio)
# @mock.patch('requests.get', local_get_successful)
# def testLoadContributorsSetsImageFile(self):
# command = import_from_wordpress.Command()
# command.load_contributors()
# contributors = ContributorPage.objects.filter(email='bob@example.com')
#
# images = AttributedImage.objects.filter(title='testcat.jpg')
# self.assertEqual(1, images.count())
# self.assertEqual(images.first(), contributors.first().headshot)
#
# @mock.patch('requests.get', local_get_404)
# def testDownloadErrorLoggedWhenErrorGettingImage(self):
# command = import_from_wordpress.Command()
# command.load_contributors()
#
# errors = ImportDownloadError.objects.all()
# self.assertEqual(1, errors.count())
# self.assertEqual(404, errors.first().status_code)
# self.assertEqual(settings.WP_IMPORTER_USER_PHOTO_URL_PATTERN.format("testcat.jpg"), errors.first().url)
def get_test_contributor_data(self):
data = [
('bob@example.com', 'first_name', 'Bob'),
('bob@example.com', 'last_name', 'Smith'),
('bob@example.com', 'nickname', 'Bobby Smith'),
('bob@example.com', 'twitter', '@bobsmith'),
('bob@example.com', 'description',
'Bob Smith is a person who does stuff.'),
('bob@example.com', 'SHORT_BIO',
'He does stuff.'),
('bob@example.com', 'userphoto_image_file', 'testcat.jpg'),
]
return data
def get_test_contributor_data_twitter_url(self):
data = [
('bob@example.com', 'first_name', 'Bob'),
('bob@example.com', 'last_name', 'Smith'),
('bob@example.com', 'nickname', 'Bobby Smith'),
('bob@example.com', 'TWITTER', 'https://twitter.com/bobsmith'),
('bob@example.com', 'description',
| 'Bob Smith is a person who does stuff.'),
('bob@example.com', 'SHORT_BIO',
'He does stuff.'),
('bob@example.com', 'userphoto_image_file', 'testcat.jpg'),
]
return data
@mock.patch('requests.get', local_get_successful)
class TestCommandImportFromWordPressUnicodeSlug(TestCase, ImageCleanUp):
def setUp(self):
import_from_wordpress.Command.get_post_data = self.get_te | st_post_data
import_from_wordpress.Command.get_post_image_data = self.get_test_post_image_data
import_from_wordpress.Command.get_data_for_topics = self.get_test_data_for_topics
def tearDown(self):
self.delete_images()
def testCreatesPageWithAsciiSlug(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='crisis-at-home-for-canadas-armed-forces')
self.assertEqual(1, pages.count())
def get_test_post_data(self, post_type):
data = [
(1,
'Crisis At Home',
'Test?',
'Body.',
"crisis-at-home-for-canadas-armed-forces%e2%80%a8",
'bob@example.com',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
]
return data
def get_test_post_image_data(self, post_id):
return None
def get_test_data_for_topic |
socialwifi/jsonapi-requests | jsonapi_requests/orm/api.py | Python | bsd-3-clause | 376 | 0 | from jsonapi_re | quests import base
from jsonapi_requests.orm import registry
class OrmApi:
def __init__(self, api):
self.type_registry = registry.TypeRegistry()
self.api = api
@classmethod
def config(cls, *args, **kwargs):
return cls(base.Api.config(*args, **kwargs))
def endpoint(self, path):
return s | elf.api.endpoint(path)
|
pekrau/Publications | tests/utils.py | Python | mit | 606 | 0 | "Utility functions for the tests."
import json
def get_settings(**defaults):
"Update | the default settings by the contents of the 'settings.json' file."
result = defaults.copy()
with open("settings.json", "rb") as infile:
data = json.load(infile)
for key in result:
try:
result[key] = data[key] |
except KeyError:
pass
if result.get(key) is None:
raise KeyError(f"Missing {key} value in settings.")
# Remove any trailing slash in the base URL.
result["BASE_URL"] = result["BASE_URL"].rstrip("/")
return result
|
Naeka/vosae-app | www/invoicing/api/resources/payment.py | Python | agpl-3.0 | 2,892 | 0 | # -*- coding:Utf-8 -*-
from tastypie import fields as base_fields
from tastypie_mongoengine import fields
from core.api.utils import TenantResource
from invoicing.models import (
Payment, InvoicePayment, DownPaymentInvoicePayment
)
from invoicing.api.doc import HELP_TEXT
__all__ = (
'PaymentResource',
)
class PaymentBaseResource(TenantResource):
issued_at = base_fields.DateTimeField(
attribute='issued_at',
readonly=True,
help_text=HELP_TEXT['payment']['issued_at']
)
date = base_fields.DateField(
attribute='date',
help_text=HELP_TEXT['payment']['date']
)
amount = base_fields.DecimalField(
attribute='amount',
help_text=HELP_TEXT['payment']['amount']
)
type = base_fields.CharField(
attribute='type',
blank=True,
help_text=HELP_TEXT['payment']['type']
)
note = base_fields.CharField(
attribute='note',
null=True,
blank=True,
help_text=HELP_TEXT['payment']['note']
)
issuer = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='issuer',
readonly=True,
he | lp_text=HELP_TEXT['payment']['issuer']
)
currency = fields.ReferenceField(
to='invoicing.api.resources.CurrencyResource',
attribute='currency',
help_text=HELP_TEXT['payment']['currency']
)
class Meta(TenantResource.Met | a):
excludes = ('tenant',)
list_allowed_methods = ('post')
detail_allowed_methods = ('get', 'delete',)
class InvoicePaymentResource(PaymentBaseResource):
related_to = fields.ReferenceField(
to='invoicing.api.resources.InvoiceResource',
attribute='related_to',
help_text=HELP_TEXT['payment']['invoice']
)
class Meta(PaymentBaseResource.Meta):
queryset = InvoicePayment.objects.all()
class DownPaymentInvoicePaymentResource(PaymentBaseResource):
related_to = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='related_to',
help_text=HELP_TEXT['payment']['down_payment_invoice']
)
class Meta(PaymentBaseResource.Meta):
queryset = DownPaymentInvoicePayment.objects.all()
class PaymentResource(PaymentBaseResource):
class Meta(PaymentBaseResource.Meta):
queryset = Payment.objects.all()
polymorphic = {
'payment': 'self',
'invoice_payment': InvoicePaymentResource,
'down_payment_invoice_payment': DownPaymentInvoicePaymentResource
}
def full_hydrate(self, bundle):
"""Set issuer on POST, extracted from request"""
bundle = super(PaymentResource, self).full_hydrate(bundle)
if bundle.request.method.lower() == 'post':
bundle.obj.issuer = bundle.request.vosae_user
return bundle
|
rec/DMXIS | Macros/Colours/Greens/Dark Green.py | Python | artistic-2.0 | 199 | 0.020101 | #===============================================================
# DMXIS Macro (c) 2010 db audioware limited
#================== | =============================================
RgbColour(0,100,0)
| |
11craft/bilsbrowser | examples/appserver/setup.py | Python | gpl-2.0 | 720 | 0.004167 | from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='appserver',
vers | ion=version,
description="Sample application server for bilsbrowser",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='ElevenCraft Inc.', |
author_email='matt@11craft.com',
url='http://github.com/11craft/bilsbrowser/',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Django == 1.0.2-final',
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/soyuz/browser/tests/test_package_copying_mixin.py | Python | agpl-3.0 | 8,305 | 0 | # Copyright 2011-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `PackageCopyin | gMixin`."""
__metaclass__ = type
from zope.component import getUtility
from zope.security.proxy import removeSecurityProxy
from lp.registry.interfaces.pocket import PackagePublishingPocket
from lp.services.propertycache import c | achedproperty
from lp.soyuz.browser.archive import (
copy_asynchronously,
render_cannotcopy_as_html,
)
from lp.soyuz.enums import SourcePackageFormat
from lp.soyuz.interfaces.archive import CannotCopy
from lp.soyuz.interfaces.packagecopyjob import IPlainPackageCopyJobSource
from lp.soyuz.interfaces.sourcepackageformat import (
ISourcePackageFormatSelectionSet,
)
from lp.testing import (
TestCase,
TestCaseWithFactory,
)
from lp.testing.layers import LaunchpadFunctionalLayer
def find_spph_copy(archive, spph):
"""Find copy of `spph`'s package as copied into `archive`"""
spr = spph.sourcepackagerelease
return archive.getPublishedSources(
name=spr.sourcepackagename.name, version=spr.version).one()
class TestPackageCopyingMixinLight(TestCase):
"""Test lightweight functions and methods.
This test does not run in a layer and does not access the database.
"""
unique_number = 1
def getUniqueString(self):
"""Return an arbitrary string."""
self.unique_number += 1
return "string_%d_" % self.unique_number
def test_render_cannotcopy_as_html_lists_errors(self):
# render_cannotcopy_as_html includes a CannotCopy error message
# into its HTML notice.
message = self.getUniqueString()
html_text = render_cannotcopy_as_html(CannotCopy(message)).escapedtext
self.assertIn(message, html_text)
def test_render_cannotcopy_as_html_escapes_error(self):
# render_cannotcopy_as_html escapes error messages.
message = "x<>y"
html_text = render_cannotcopy_as_html(CannotCopy(message)).escapedtext
self.assertNotIn(message, html_text)
self.assertIn("x<>y", html_text)
class TestPackageCopyingMixinIntegration(TestCaseWithFactory):
"""Integration tests for `PackageCopyingMixin`."""
layer = LaunchpadFunctionalLayer
@cachedproperty
def person(self):
"""Create a single person who gets blamed for everything.
Creating SPPHs, Archives etc. in the factory creates lots of
`Person`s, which turns out to be really slow. Tests that don't
care who's who can use this single person for all uninteresting
Person fields.
"""
return self.factory.makePerson()
def makeDistribution(self):
"""Create a `Distribution`, but quickly by reusing a single Person."""
return self.factory.makeDistribution(
owner=self.person, registrant=self.person)
def makeDistroSeries(self, previous_series=None):
"""Create a `DistroSeries`, but quickly by reusing a single Person."""
return self.factory.makeDistroSeries(
distribution=self.makeDistribution(),
previous_series=previous_series,
registrant=self.person)
def makeSPPH(self):
"""Create a `SourcePackagePublishingHistory` quickly."""
archive = self.factory.makeArchive(
owner=self.person, distribution=self.makeDistribution())
return self.factory.makeSourcePackagePublishingHistory(
maintainer=self.person, creator=self.person, archive=archive)
def makeDerivedSeries(self):
"""Create a derived `DistroSeries`, quickly."""
parent_series = self.makeDistroSeries()
derived_series = self.makeDistroSeries()
self.factory.makeDistroSeriesParent(
parent_series=parent_series, derived_series=derived_series)
getUtility(ISourcePackageFormatSelectionSet).add(
derived_series, SourcePackageFormat.FORMAT_1_0)
return derived_series
def getUploader(self, archive, spn):
"""Get person with upload rights for the given package and archive."""
uploader = archive.owner
removeSecurityProxy(archive).newPackageUploader(uploader, spn)
return uploader
def test_copy_asynchronously_does_not_copy_packages(self):
# copy_asynchronously does not copy packages into the destination
# archive; that happens later, asynchronously.
spph = self.makeSPPH()
dest_series = self.makeDerivedSeries()
archive = dest_series.distribution.main_archive
pocket = self.factory.getAnyPocket()
copy_asynchronously(
[spph], archive, dest_series, pocket, include_binaries=False,
check_permissions=False, person=self.factory.makePerson())
self.assertEqual(None, find_spph_copy(archive, spph))
def test_copy_asynchronously_creates_copy_jobs(self):
# copy_asynchronously creates PackageCopyJobs.
spph = self.makeSPPH()
dest_series = self.makeDerivedSeries()
pocket = self.factory.getAnyPocket()
archive = dest_series.distribution.main_archive
copy_asynchronously(
[spph], archive, dest_series, pocket, include_binaries=False,
check_permissions=False, person=self.factory.makePerson())
jobs = list(getUtility(IPlainPackageCopyJobSource).getActiveJobs(
archive))
self.assertEqual(1, len(jobs))
job = jobs[0]
spr = spph.sourcepackagerelease
self.assertEqual(spr.sourcepackagename.name, job.package_name)
self.assertEqual(spr.version, job.package_version)
self.assertEqual(dest_series, job.target_distroseries)
def test_copy_asynchronously_handles_no_dest_series(self):
# If dest_series is None, copy_asynchronously creates jobs that will
# copy each source into the same distroseries in the target archive.
distribution = self.makeDistribution()
series_one = self.factory.makeDistroSeries(
distribution=distribution, registrant=self.person)
series_two = self.factory.makeDistroSeries(
distribution=distribution, registrant=self.person)
spph_one = self.factory.makeSourcePackagePublishingHistory(
distroseries=series_one, sourcepackagename="one",
maintainer=self.person, creator=self.person)
spph_two = self.factory.makeSourcePackagePublishingHistory(
distroseries=series_two, sourcepackagename="two",
maintainer=self.person, creator=self.person)
pocket = self.factory.getAnyPocket()
target_archive = self.factory.makeArchive(
owner=self.person, distribution=distribution)
copy_asynchronously(
[spph_one, spph_two], target_archive, None, pocket,
include_binaries=False, check_permissions=False,
person=self.person)
jobs = list(getUtility(IPlainPackageCopyJobSource).getActiveJobs(
target_archive))
self.assertEqual(2, len(jobs))
self.assertContentEqual(
[("one", spph_one.distroseries), ("two", spph_two.distroseries)],
[(job.package_name, job.target_distroseries) for job in jobs])
def test_copy_asynchronously_may_allow_copy(self):
# In a normal working situation, copy_asynchronously allows a
# copy.
spph = self.makeSPPH()
pocket = PackagePublishingPocket.RELEASE
dest_series = self.makeDerivedSeries()
dest_archive = dest_series.main_archive
spn = spph.sourcepackagerelease.sourcepackagename
notification = copy_asynchronously(
[spph], dest_archive, dest_series, pocket, False,
person=self.getUploader(dest_archive, spn))
self.assertIn("Requested", notification.escapedtext)
def test_copy_asynchronously_checks_permissions(self):
# Unless told not to, copy_asynchronously does a permissions
# check.
spph = self.makeSPPH()
pocket = self.factory.getAnyPocket()
dest_series = self.makeDistroSeries()
self.assertRaises(
|
jankoslavic/numpy | numpy/lib/shape_base.py | Python | bsd-3-clause | 25,676 | 0.000818 | from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, isscalar, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape
from numpy.core import vstack, atleast_3d
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs: any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
| k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
| outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to |
DjangoGirls/djangogirls | story/urls.py | Python | bsd-3-clause | 161 | 0 | from django.u | rls import path
from story.views import StoryListView
app_name = "story"
urlpatterns = [
path('', StoryList | View.as_view(), name='stories'),
]
|
nexedi/dream | dream/simulation/Exit.py | Python | gpl-3.0 | 9,069 | 0.012681 | # ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
''' |
Created on 6 Feb 2013
@author: George
'''
'''
models the exit of the mod | el
'''
# from SimPy.Simulation import now, Process, Resource, infinity, waituntil, waitevent
import simpy
import xlwt
from CoreObject import CoreObject
# ===========================================================================
# The exit object
# ===========================================================================
class Exit(CoreObject):
family='Exit'
def __init__(self, id, name, cancelCondition={},**kw):
self.type="Exit" # XXX needed ?
#lists to hold statistics of multiple runs
self.Exits=[]
self.UnitExits=[]
self.Lifespan=[]
self.TaktTime=[]
# if input is given in a dictionary
CoreObject.__init__(self, id, name)
from Globals import G
G.ExitList.append(self)
self.cancelCondition=cancelCondition
def initialize(self):
# using the Process __init__ and not the CoreObject __init__
CoreObject.initialize(self)
# initialize the internal Queue (type Resource) of the Exit
self.Res=simpy.Resource(self.env, capacity=float('inf'))
# The number of resource that exited through this exit.
# XXX bug: cannot output as json when nothing has exited.
self.numOfExits=0
self.totalNumberOfUnitsExited=0
self.totalLifespan=0
self.totalTaktTime=0 # the total time between to consecutive exits
self.intervalThroughPutList=[]
self.expectedSignals['isRequested']=1
def run(self):
while 1:
# wait until the Queue can accept an entity and one predecessor requests it
self.expectedSignals['isRequested']=1
yield self.isRequested
self.isRequested=self.env.event()
# TODO: insert extra controls to check whether the self.giver attribute is correctly updated
self.getEntity()
self.signalGiver()
# =======================================================================
# sets the routing in element for the Exit
# =======================================================================
def defineRouting(self, predecessorList=[]):
self.previous=predecessorList # no successorList for the Exit
# =======================================================================
# checks if the Exit can accept an entity
# =======================================================================
def canAccept(self, callerObject=None):
return True #the exit always can accept an entity
# =======================================================================
# checks if the Exit can accept an entity
# and there is an entity waiting for it
# =======================================================================
def canAcceptAndIsRequested(self,callerObject=None):
# get the active object and its internal queue
activeObject=self.getActiveObject()
activeObjectQueue=self.getActiveObjectQueue()
giverObject=callerObject
assert giverObject, 'there must be a caller for canAcceptAndIsRequested'
return giverObject.haveToDispose(self)
# =======================================================================
# gets an entity from the predecessor
# =======================================================================
def getEntity(self):
activeEntity = CoreObject.getEntity(self) #run the default method
# if the entity is in the G.pendingEntities list then remove it from there
from Globals import G
# G.pendingEntities[:]=(entity for entity in G.pendingEntities if not entity is activeEntity)
if G.RouterList:
if activeEntity in G.pendingEntities:
G.pendingEntities.remove(activeEntity)
# if activeEntity in G.EntityList:
# G.EntityList.remove(activeEntity)
# self.clear(activeEntity)
self.totalLifespan+=self.env.now-activeEntity.startTime #Add the entity's lifespan to the total one.
self.numOfExits+=1 # increase the exits by one
self.totalNumberOfUnitsExited+=activeEntity.numberOfUnits # add the number of units that xited
self.totalTaktTime+=self.env.now-self.timeLastEntityLeft # add the takt time
self.timeLastEntityLeft=self.env.now # update the time that the last entity left from the Exit
activeObjectQueue=self.getActiveObjectQueue()
del self.Res.users[:]
# if there is a cancelCondition the exit may end the simulation
if self.cancelCondition:
if self.cancelCondition.get('reason',None) =='throughput' and int(self.cancelCondition.get('number',-1))==self.numOfExits:
self.endSimulation()
if self.cancelCondition.get('reason',None) =='empty' and self.checkIfSystemEmpty():
self.endSimulation()
return activeEntity
@staticmethod
def clear(entity):
from Globals import G
def deleteEntityfromlist(entity, list):
if entity in list:
list.remove(entity)
lists=(G.EntityList, G.PartList, G.pendingEntities, G.WipList)
# lists=(G.EntityList, G.PartList, G.BatchList, G.SubBatchList,
# G.JobList, G.OrderList, G.OrderComponentList, G.MouldList,
# G.pendingEntities, G.WipList)
for list in lists:
deleteEntityfromlist(entity,list)
#===========================================================================
# haveToDispose of an exit must always return False
#===========================================================================
def haveToDispose(self, callerObject=None):
return False
# =======================================================================
# actions to be taken after the simulation ends
# =======================================================================
def postProcessing(self, MaxSimtime=None):
from Globals import G
if MaxSimtime==None:
MaxSimtime=G.maxSimTime
# hold the numberOfExits of each replication
self.Exits.append(self.numOfExits)
self.UnitExits.append(self.totalNumberOfUnitsExited)
try: # throw exception in case the numOfExits is zero
self.Lifespan.append(((self.totalLifespan)/self.numOfExits)/G.Base)
except ZeroDivisionError: # the lifespan in this case is zero
self.Lifespan.append(0)
try: # throw exception in case of zero division
self.TaktTime.append(((self.totalTaktTime)/self.numOfExits)/G.Base)
except ZeroDivisionError: # the average time between exits is |
sirmar/tetris | tetris/values/test/test_key.py | Python | mit | 330 | 0.00303 | from nose.tools import istest, eq_
from tetr | is.values.key import Key
class TestKey(object):
@istest
def same_key_is_equal(self):
eq_(Key( | "key"), Key("key"))
@istest
def key_is_usable_as_key(self):
key_dict = {
Key("key"): "Value"
}
eq_(key_dict[Key("key")], "Value")
|
2asoft/tdesktop | Telegram/build/release.py | Python | gpl-3.0 | 7,388 | 0.0134 | import os, sys, requests, pprint, re, json
from uritemplate import URITemplate, expand
from subprocess import call
changelog_file = '../../changelog.txt'
token_file = '../../../TelegramPrivate/github-releases-token.txt'
version = ''
commit = ''
for arg in sys.argv:
if re.match(r'\d+\.\d+', arg):
version = arg
elif re.match(r'^[a-f0-9]{40}$', arg):
commit = arg
# thanks http://stackoverflow.com/questions/13909900/progress-of-python-requests-post
class upload_in_chunks(object):
def __init__(self, filename, chunksize=1 << 13):
self.filename = filename
self.chunksize = chunksize
self.totalsize = os.path.getsize(filename)
self.readsofar = 0
def __iter__(self):
with open(self.filename, 'rb') as file:
while True:
data = file.read(self.chunksize)
if not data:
sys.stderr.write("\n")
break
self.readsofar += len(data)
percent = self.readsofar * 1e2 / self.totalsize
sys.stderr.write("\r{percent:3.0f}%".format(percent=percent))
yield data
def __len__(self):
return self.totalsize
class IterableToFileAdapter(object):
def __init__(self, iterable):
self.iterator = iter(iterable)
self.length = len(iterable)
def read(self, size=-1): # TBD: add buffer for `len(data) > size` case
return next(self.iterator, b'')
def __len__(self):
return self.length
def checkResponseCode(result, right_code):
if (result.status_code != right_code):
print('Wrong result code: ' + str(result.status_code) + ', should be ' + str(right_code))
sys.exit(1)
pp = pprint.PrettyPrinter(indent=2)
url = 'https://api.github.com/'
version_parts = version.split('.')
stable = 1
alpha = 0
dev = 0
if len(version_parts) < 2:
print('Error: expected at least major version ' + version)
sys.exit(1)
if len(version_parts) > 4:
print('Error: bad version passed ' + version)
sys.exit(1)
version_major = version_parts[0] + '.' + version_parts[1]
if len(version_parts) == 2:
version = version_major + '.0'
version_full = version
else:
version = version_major + '.' + version_parts[2]
version_full = version
if len(version_parts) == 4:
if version_parts[3] == 'dev':
dev = 1
stable = 0
version_full = version + '.dev'
elif version_parts[3] == 'alpha':
alpha = 1
stable = 0
version_full = version + '.alpha'
else:
print('Error: unexpected version part ' + version_parts[3])
sys.exit(1)
access_token = ''
if os.path.isfile(token_file):
with open(token_file) as f:
for line in f:
access_token = line.replace('\n', '')
if access_token == '':
print('Access token not found!')
sys.exit(1)
print('Version: ' + version_full);
local_folder = '/Volumes/Storage/backup/' + version_major + '/' + version_full
if stable == 1:
if os.path.isdir(local_folder + '.dev'):
dev = 1
stable = 0
version_full = version + '.dev'
local_folder = local_folder + '.dev'
elif os.path.isdir(local_folder + '.alpha'):
alpha = 1
stable = 0
version_full = version + '.alpha'
local_folder = local_folder + '.alpha'
if not os.path.isdir(local_folder):
print('Storage path not found!')
sys.exit(1)
local_folder = local_folder + '/'
files = []
files.append({
'local': 'tsetup.' + version_full + '.exe',
'remote': 'tsetup.' + version_full + '.exe',
'backup_folder': 'tsetup',
'mime': 'application/octet-stream',
'label': 'Windows: Installer',
})
files.append({
'local': 'tportable.' + version_full + '.zip',
'remote': 'tportable.' + version_full + '.zip',
'backup_folder': 'tsetup',
'mime': 'application/zip',
'label': 'Windows: Portable',
})
files.append({
'local': 'tsetup.' + version_full + '.dmg',
'remote': 'tsetup.' + version_full + '.dmg',
'backup_folder': 'tmac',
'mime': 'application/octet-stream',
'label': 'macOS and OS X 10.8+: Installer',
})
files.append({
'local': 'tsetup32.' + version_full + '.dmg',
'remote': 'tsetup32.' + version_full + '.dmg',
'backup_folder': 'tmac32',
'mime': 'application/octet-stream',
'label': 'OS X 10.6 and 10.7: Installer',
})
files.append({
'local': 'tsetup.' + version_full + '.tar.xz',
'remote': 'tsetup.' + version_full + '.tar.xz',
'backup_folder': 'tlinux',
'mime': 'application/octet-stream',
'label': 'Linux 64 bit: Binary',
})
files.append({
'local': 'tsetup32.' + version_full + '.tar.xz',
'remote': 'tsetup32.' + version_full + '.tar.xz',
'backup_folder': 'tlinux32',
'mime': 'application/octet-stream',
'label': 'Linux 32 bit: Binary',
})
r = requests.get(url + 'repos/telegramdesktop/tdesktop/releases/tags/v' + version)
if r.status_code == 404:
print('Release not found, creating.')
if commit == '':
print('Error: specify the commit.')
sys.exit(1)
if not os.path.isfile(changelog_file):
print('Error: Changelog file not found.')
sys.exit(1)
changelog = ''
started = 0
with open(changelog_file) as f:
for line in f:
if started == 1:
if re.match(r'^\d+\.\d+', line):
break;
if re.match(r'^\s+$', line):
continue
changelog += line
else:
if re.match(r'^\d+\.\d+', line):
if line[0:len(version) + 1] == version + ' ':
started = 1
elif line[0:len(version_major) + 1] == version_major + ' ':
if version_major + '.0' == version:
started = 1
if started != 1:
print('Error: Changelog not found.')
sys.exit(1)
changelog = changelog.strip()
print('Changelog: ');
print(changelog);
r = requests.post(url + 'repos/telegramdesktop/tdesktop/releases', headers={'Authorization': 'token ' + access_token}, data=json.dumps({
'tag_name': 'v' + version,
'target_commitish': commit,
'name': 'v ' + version,
'body': changelog,
'prerelease': (dev == 1 or alpha == 1),
}))
checkResponseCode( | r, 201)
r = requests.get(url + 'repos/telegramdesktop/tdesktop/releases/tags/v' + version)
checkResponseCode(r, 200);
release_data = r.json()
#pp.pprint(release_data)
release_id = release_data['id']
print('Release ID: ' + str(release_id))
r = requests.get(url + 'repos/telegramdesktop/tdesktop/releases/' + str(release_id) + '/assets');
checkResponseCode(r, 200);
assets = release_data['assets']
for ass | et in assets:
name = asset['name']
found = 0
for file in files:
if file['remote'] == name:
print('Already uploaded: ' + name)
file['already'] = 1
found = 1
break
if found == 0:
print('Warning: strange asset: ' + name)
for file in files:
if 'already' in file:
continue
file_path = local_folder + file['backup_folder'] + '/' + file['local']
if not os.path.isfile(file_path):
print('Warning: file not found ' + file['local'])
continue
upload_url = expand(release_data['upload_url'], {'name': file['remote'], 'label': file['label']}) + '&access_token=' + access_token;
content = upload_in_chunks(file_path, 10)
print('Uploading: ' + file['remote'] + ' (' + str(round(len(content) / 10000) / 100.) + ' MB)')
r = requests.post(upload_url, headers={"Content-Type": file['mime']}, data=IterableToFileAdapter(content))
checkResponseCode(r, 201)
print('Success! Removing.')
return_code = call(["rm", file_path])
if return_code != 0:
print('Bad rm code: ' + str(return_code))
sys.exit(1)
sys.exit()
|
chenbojian/SU2 | SU2_PY/SU2/io/config.py | Python | lgpl-2.1 | 30,110 | 0.016739 | #!/usr/bin/env python
## \file config.py
# \brief python package for config
# \author T. Lukaczyk, F. Palacios
# \version 3.2.9 "eagle"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
import numpy as np
from ..util import bunch, ordered_bunch, switch
from .tools import *
from config_options import *
try:
from collections import OrderedDict
except ImportEr | ror:
from ..util.ordered_dict import OrderedDict
inf = 1.0e20
# ----------------------------------------------------------------------
# Configuration Class
# ----------------------------------------------------------------------
class Config(or | dered_bunch):
""" config = SU2.io.Config(filename="")
Starts a config class, an extension of
ordered_bunch()
use 1: initialize by reading config file
config = SU2.io.Config('filename')
use 2: initialize from dictionary or bunch
config = SU2.io.Config(param_dict)
use 3: initialize empty
config = SU2.io.Config()
Parameters can be accessed by item or attribute
ie: config['MESH_FILENAME'] or config.MESH_FILENAME
Methods:
read() - read from a config file
write() - write to a config file (requires existing file)
dump() - dump a raw config file
unpack_dvs() - unpack a design vector
diff() - returns the difference from another config
dist() - computes the distance from another config
"""
_filename = 'config.cfg'
def __init__(self,*args,**kwarg):
# look for filename in inputs
if args and isinstance(args[0],str):
filename = args[0]
args = args[1:]
elif kwarg.has_key('filename'):
filename = kwarg['filename']
del kwarg['filename']
else:
filename = ''
# initialize ordered bunch
super(Config,self).__init__(*args,**kwarg)
# read config if it exists
if filename:
try:
self.read(filename)
except:
raise IOError , 'Could not find config file: %s' % filename
self._filename = filename
def read(self,filename):
""" reads from a config file """
konfig = read_config(filename)
self.update(konfig)
def write(self,filename=''):
""" updates an existing config file """
if not filename: filename = self._filename
assert os.path.exists(filename) , 'must write over an existing config file'
write_config(filename,self)
def dump(self,filename=''):
""" dumps all items in the config bunch, without comments """
if not filename: filename = self._filename
dump_config(filename,self)
def __getattr__(self,k):
try:
return super(Config,self).__getattr__(k)
except AttributeError:
raise AttributeError , 'Config parameter not found'
def __getitem__(self,k):
try:
return super(Config,self).__getitem__(k)
except KeyError:
raise KeyError , 'Config parameter not found: %s' % k
def unpack_dvs(self,dv_new,dv_old=None):
""" updates config with design variable vectors
will scale according to each DEFINITION_DV scale parameter
Modifies:
DV_KIND
DV_MARKER
DV_PARAM
DV_VALUE_OLD
DV_VALUE_NEW
Inputs:
dv_new - list or array of new dv values
dv_old - optional, list or array of old dv values, defaults to zeros
"""
dv_new = copy.deepcopy(dv_new)
dv_old = copy.deepcopy(dv_old)
# handle unpacking cases
def_dv = self['DEFINITION_DV']
n_dv = len(def_dv['KIND'])
if not dv_old: dv_old = [0.0]*n_dv
assert len(dv_new) == len(dv_old) , 'unexpected design vector length'
# handle param
param_dv = self['DV_PARAM']
# apply scale
dv_scales = def_dv['SCALE']
dv_new = [ dv_new[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
dv_old = [ dv_old[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
# Change the parameters of the design variables
self['DV_KIND'] = def_dv['KIND']
param_dv['PARAM'] = def_dv['PARAM']
param_dv['FFDTAG'] = def_dv['FFDTAG']
self.update({ 'DV_MARKER' : def_dv['MARKER'][0] ,
'DV_VALUE_OLD' : dv_old ,
'DV_VALUE_NEW' : dv_new })
def __eq__(self,konfig):
return super(Config,self).__eq__(konfig)
def __ne__(self,konfig):
return super(Config,self).__ne__(konfig)
def local_files(self):
""" removes path prefix from all *_FILENAME params
"""
for key,value in self.iteritems():
if key.split('_')[-1] == 'FILENAME':
self[key] = os.path.basename(value)
def diff(self,konfig):
""" compares self to another config
Inputs:
konfig - a second config
Outputs:
config_diff - a config containing only the differing
keys, each with values of a list of the different
config values.
for example:
config_diff.MATH_PROBLEM = ['DIRECT','ADJOINT']
"""
keys = set([])
keys.update( self.keys() )
keys.update( konfig.keys() )
konfig_diff = Config()
for key in keys:
value1 = self.get(key,None)
value2 = konfig.get(key,None)
if not value1 == value2:
konfig_diff[key] = [value1,value2]
return konfig_diff
def dist(self,konfig,keys_check='ALL'):
""" calculates a distance to another config
Inputs:
konfig - a second config
keys_check - optional, a list of keys to check
Outputs:
distance - a float
Currently only works for DV_VALUE_NEW and DV_VALUE_OLD
Returns a large value otherwise
"""
konfig_diff = self.diff(konfig)
if keys_check == 'ALL':
keys_check = konfig_diff.keys()
distance = 0.0
for key in keys_ch |
christianmemije/kolibri | kolibri/deployment/default/settings/dev.py | Python | mit | 206 | 0 | from __f | uture__ import absolute_import, print_function, unicode_literals
from .base import * # noqa isort:skip @UnusedWildImport
INSTALLED_APPS += ['rest_framewor | k_swagger'] # noqa
REST_SWAGGER = True
|
andymckay/zamboni | services/verify.py | Python | bsd-3-clause | 14,089 | 0.000213 | import calendar
import json
from datetime import datetime
from time import gmtime, time
from urlparse import parse_qsl, urlparse
from wsgiref.handlers import format_date_time
import jwt
from browserid.errors import ExpiredSignatureError
from django_statsd.clients import statsd
from receipts import certs
from lib.cef_loggers import receipt_cef
from lib.crypto.receipt import sign
from lib.utils import static_url
from services.utils import settings
from utils import (CONTRIB_CHARGEBACK, CONTRIB_NO_CHARGE, CONTRIB_PURCHASE,
CONTRIB_REFUND, log_configure, log_exception, log_info,
mypool)
# Go configure the log.
log_configure()
# This has to be imported after the settings (utils).
import receipts # NOQA, used for patching in the tests
status_codes = {
200: '200 OK',
405: '405 Method Not Allowed',
500: '500 Internal Server Error',
}
class VerificationError(Exception):
pass
class InvalidReceipt(Exception):
"""
InvalidReceipt takes a message, which is then displayed back to the app so
they can understand the failure.
"""
pass
class RefundedReceipt(Exception):
pass
class Verify:
def __init__(self, receipt, environ):
self.receipt = receipt
self.environ = environ
# This is so the unit tests can override the connection.
self.conn, self.cursor = None, None
def check_full(self):
"""
This is the default that verify will use, this will
do the entire stack of checks.
"""
receipt_domain = urlparse(static_url('WEBAPPS_RECEIPT_URL')).netloc
try:
self.decoded = self.decode()
self.check_type('purchase-receipt')
self.check_url(receipt_domain)
self.check_purchase()
except InvalidReceipt, err:
return self.invalid(str(err))
except RefundedReceipt:
return self.refund()
return self.ok_or_expired()
def check_without_purchase(self):
"""
This is what the developer and reviewer receipts do, we aren't
expecting a p | urchase, but require a specific type and install.
"""
try:
self.decoded = self.decode()
self.check_type('developer-receipt', 'reviewer-receipt')
self.check_url(settings.DOMAIN)
exce | pt InvalidReceipt, err:
return self.invalid(str(err))
return self.ok_or_expired()
def check_without_db(self, status):
"""
This is what test receipts do, no purchase or install check.
In this case the return is custom to the caller.
"""
assert status in ['ok', 'expired', 'invalid', 'refunded']
try:
self.decoded = self.decode()
self.check_type('test-receipt')
self.check_url(settings.DOMAIN)
except InvalidReceipt, err:
return self.invalid(str(err))
return getattr(self, status)()
def decode(self):
"""
Verifies that the receipt can be decoded and that the initial
contents of the receipt are correct.
If its invalid, then just return invalid rather than give out any
information.
"""
try:
receipt = decode_receipt(self.receipt)
except:
log_exception({'receipt': '%s...' % self.receipt[:10],
'app': self.get_app_id(raise_exception=False)})
log_info('Error decoding receipt')
raise InvalidReceipt('ERROR_DECODING')
try:
assert receipt['user']['type'] == 'directed-identifier'
except (AssertionError, KeyError):
log_info('No directed-identifier supplied')
raise InvalidReceipt('NO_DIRECTED_IDENTIFIER')
return receipt
def check_type(self, *types):
"""
Verifies that the type of receipt is what we expect.
"""
if self.decoded.get('typ', '') not in types:
log_info('Receipt type not in %s' % ','.join(types))
raise InvalidReceipt('WRONG_TYPE')
def check_url(self, domain):
"""
Verifies that the URL of the verification is what we expect.
:param domain: the domain you expect the receipt to be verified at,
note that "real" receipts are verified at a different domain
from the main marketplace domain.
"""
path = self.environ['PATH_INFO']
parsed = urlparse(self.decoded.get('verify', ''))
if parsed.netloc != domain:
log_info('Receipt had invalid domain')
raise InvalidReceipt('WRONG_DOMAIN')
if parsed.path != path:
log_info('Receipt had the wrong path')
raise InvalidReceipt('WRONG_PATH')
def get_user(self):
"""
Attempt to retrieve the user information from the receipt.
"""
try:
return self.decoded['user']['value']
except KeyError:
# If somehow we got a valid receipt without a uuid
# that's a problem. Log here.
log_info('No user in receipt')
raise InvalidReceipt('NO_USER')
def get_storedata(self):
"""
Attempt to retrieve the storedata information from the receipt.
"""
try:
storedata = self.decoded['product']['storedata']
return dict(parse_qsl(storedata))
except Exception, e:
log_info('Invalid store data: {err}'.format(err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_app_id(self, raise_exception=True):
"""
Attempt to retrieve the app id from the storedata in the receipt.
"""
try:
return int(self.get_storedata()['id'])
except Exception, e:
if raise_exception:
# There was some value for storedata but it was invalid.
log_info('Invalid store data for app id: {err}'.format(
err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_contribution_id(self):
"""
Attempt to retrieve the contribution id
from the storedata in the receipt.
"""
try:
return int(self.get_storedata()['contrib'])
except Exception, e:
# There was some value for storedata but it was invalid.
log_info('Invalid store data for contrib id: {err}'.format(
err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_inapp_id(self):
"""
Attempt to retrieve the inapp id
from the storedata in the receipt.
"""
return self.get_storedata()['inapp_id']
def setup_db(self):
"""
Establish a connection to the database.
All database calls are done at a low level and avoid the
Django ORM.
"""
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
def check_purchase(self):
"""
Verifies that the app or inapp has been purchased.
"""
storedata = self.get_storedata()
if 'contrib' in storedata:
self.check_purchase_inapp()
else:
self.check_purchase_app()
def check_purchase_inapp(self):
"""
Verifies that the inapp has been purchased.
"""
self.setup_db()
sql = """SELECT i.guid, c.type FROM stats_contributions c
JOIN inapp_products i ON i.id=c.inapp_product_id
WHERE c.id = %(contribution_id)s LIMIT 1;"""
self.cursor.execute(
sql,
{'contribution_id': self.get_contribution_id()}
)
result = self.cursor.fetchone()
if not result:
log_info('Invalid in-app receipt, no purchase')
raise InvalidReceipt('NO_PURCHASE')
contribution_inapp_id, purchase_type = result
self.check_purchase_type(purchase_type)
self.check_inapp_product(contribution_inapp_id)
def check_inapp_product(self, contribution_inapp_id):
if contribution_in |
balikasg/SemEval2016-Twitter_Sentiment_Evaluation | src/ark-tweet-nlp-0.3.2/scripts/toconll.py | Python | gpl-3.0 | 595 | 0.015126 | #!/usr/bin/env python
# Take the pretsv format and make it CoNLL-like | ("supertsv", having tweet metadata headers)
import sys,json
from datetime import datetime
for line in sys.stdin:
parts = line.split('\t')
tokens = parts[0].split()
tags = parts[1].split()
try:
d = json.loads(parts[-1])
print "TWEET\t{}\t{}".format(d['id'], datetime.strptime(d['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime("%Y-%m-%dT%H:%M:%S"))
print "TOKENS"
except:
pass
for tok,tag in zip(tokens,tags):
| print "{}\t{}".format(tag,tok)
print ""
|
QuantumElephant/horton | data/examples/hf_dft/rks_water_hybgga.py | Python | gpl-3.0 | 3,728 | 0.002682 | #!/usr/bin/env python
#JSON {"lot": "RKS/6-31G(d)",
#JSON "scf": "EDIIS2SCFSolver",
#JSON "er": "cholesky",
#JSON "difficulty": 5,
#JSON "description": "Basic RKS DFT example with hyrbid GGA exhange-correlation functional (B3LYP)"}
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/water.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g(d)')
# Compute Gaussian integrals
olp = obasis.compute_overlap()
kin = obasis.compute_kinetic()
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
er_vecs = obasis.compute_electron_repulsion_cholesky()
# Define a numerical integration grid needed the XC functionals
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers)
# Create alpha orbitals
orb_alpha = Orbitals(obasis.nbasis)
# Initial guess
guess_core_hamiltonian(olp, kin + na, orb_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
libxc_term = RLibXCHybridGGA('xc_b3lyp')
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er_vecs, 'hartree'),
RGridGroup(obasis, grid, [libxc_term]),
RExchangeTerm(er_vecs, 'x_hf', libxc_term.get_exx_fraction()),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with CDIIS+EDIIS SCF
# - Construct the initial density matrix (needed for CDIIS+EDIIS).
occ_model.assign(orb_alpha)
dm_alpha = orb_alpha.to_dm()
# - SCF solver
scf_solver = EDIIS2SCFSolver(1e-6)
scf_solver(ham, olp, occ_model, dm_alpha)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = np.zeros(olp.shape)
ham.reset(dm_alpha)
ham.compute_energy()
ham.compute_fock(fock_alpha)
orb_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# late | r analysis. Note that the CDIIS_EDIIS algorithm can only really construct
# an optimized density m | atrix and no orbitals.
mol.title = 'RKS computation on water'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.orb_alpha = orb_alpha
mol.dm_alpha = dm_alpha
# useful for post-processing (results stored in double precision):
mol.to_file('water.h5')
# CODE BELOW IS FOR horton-regression-test.py ONLY. IT IS NOT PART OF THE EXAMPLE.
rt_results = {
'energy': ham.cache['energy'],
'orb_alpha': orb_alpha.energies,
'nn': ham.cache["energy_nn"],
'kin': ham.cache["energy_kin"],
'ne': ham.cache["energy_ne"],
'grid': ham.cache["energy_grid_group"],
'hartree': ham.cache["energy_hartree"],
'x_hf': ham.cache["energy_x_hf"],
}
# BEGIN AUTOGENERATED CODE. DO NOT CHANGE MANUALLY.
rt_previous = {
'energy': -76.406156776346975,
'orb_alpha': np.array([
-19.12494652215198, -0.99562109649344044, -0.52934359625260619,
-0.35973919172781244, -0.28895110439599314, 0.068187099284877942,
0.1532902668612677, 0.80078130036326101, 0.84958389626115138, 0.89305132504935913,
0.92182191946355896, 1.074508959522454, 1.3767806620540104, 1.7405943781554678,
1.7462666980125516, 1.7861275433424106, 2.3057917944397714, 2.5943014303914662
]),
'grid': -7.568923843396495,
'hartree': 46.893530019953076,
'kin': 76.03393036526309,
'ne': -199.129803256826,
'nn': 9.1571750364299866,
'x_hf': -1.792065097770653,
}
|
ewbankkit/cloud-custodian | tools/c7n_azure/c7n_azure/resources/key_vault.py | Python | apache-2.0 | 10,984 | 0.001092 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure.graphrbac import GraphRbacManagementClient
from c7n_azure.actions.base import AzureBaseAction
from c7n_azure.filters import FirewallRulesFilter
from c7n_azure.provider import resources
from c7n_azure.session import Session
from c7n.filters import Filter
from c7n.utils import type_schema
from c7n_azure.utils import GraphHelper
from c7n_azure.resources.arm import ArmResourceManager
import logging
from netaddr import IPSet
log = logging.getLogger('custodian.azure.keyvault')
@resources.register('keyvault')
class KeyVault(ArmResourceManager):
"""Key Vault Resource
:example:
This policy will find all KeyVaults with 10 or less API Hits over the last 72 hours
.. code-block:: yaml
policies:
- name: inactive-keyvaults
resource: azure.keyvault
filters:
- type: metric
metric: ServiceApiHit
op: ge
aggregation: total
threshold: 10
timeframe: 72
:example:
This policy will find all KeyVaults where Service Principals that
have access permissions that exceed `read-only`.
.. code-block:: yaml
policies:
- name: policy
description:
Ensure only authorized people have an access
resource: azure.keyvault
filters:
- not:
- type: whitelist
key: principalName
users:
- account1@sample.com
- account2@sample.com
permissions:
keys:
- get
secrets:
- get
certificates:
- get
:example:
This policy will find all KeyVaults and add get and list permissions for keys.
.. code-block:: yaml
policies:
- name: policy
description:
Add get and list permissions to keys access policy
resource: azure.keyvault
actions:
- type: update-access-policy
operation: add
access-policies:
- tenant-id: 00000000-0000-0000-0000-000000000000
object-id: 11111111-1111-1111-1111-111111111111
permissions:
keys:
- get
- list
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Security']
service = 'azure.mgmt.keyvault'
client = 'KeyVaultManagementClient'
enum_spec = ('vaults', 'list', None)
resource_type = 'Microsoft.KeyVault/vaults'
@KeyVault.filter_registry.register('firewall-rules')
class KeyVaultFirewallRulesFilter(FirewallRulesFilter):
def __init__(self, data, manager=None):
super(KeyVaultFirewallRulesFilter, self).__init__(data, manager)
self._log = log
@property
def log(self):
return self._log
def _query_rules(self, resource):
if 'properties' not in resource:
client = self.manager.get_client()
vault = client.vaults.get(resource['resourceGroup'], resource['name'])
resource['properties'] = vault.properties.serialize()
if 'networkAcls' not in resource['properties']:
return []
ip_rules = resource['properties']['networkAcls']['ipRules']
resource_rules = IPSet([r['value'] for r in ip_rules])
return resource_rules
@KeyVault.filter_registry.register('whitelist')
class WhiteListFilter(Filter):
schema = type_schema('whitelist', rinherit=None,
required=['key'],
key={'type': 'string'},
users={'type': 'array'},
permissions={
'certificates': {'type': 'array'},
'secrets': {'type': 'array'},
'keys': {'type': 'array'}})
GRAPH_PROVIDED_KEYS = ['displayName', 'aadType', 'principalName']
graph_client = None
def __init__(self, data, manager=None):
super(WhiteListFilter, self).__init__(data, manager)
self.key = self.data['key']
# If not specified, initialize with empty list or dictionary.
self.users = self.data.get('users', [])
self.permissions = self.data.get('permissions', {})
def __call__(self, i):
if 'accessPolicies' not in i:
client = self.manager.get_client()
vault = client.vaults.get(i['resourceGroup'], i['name'])
# Retrieve access policies for the keyvaults
access_policies = []
for policy in vault.properties.access_policies:
access_policies.append({
'tenantId': policy.tenant_id,
'objectId': policy.object_id,
'applicationId': policy.application_id,
'permissions': {
'keys': policy.permissions.keys,
'secrets': policy.permissions.secrets,
'certificates': policy.permissions.certificates
}
})
# Enhance access policies with displayName, aadType and
# principalName if necessary
if self.key in self.GRAPH_PROVIDED_KEYS:
i['accessPolicies'] = self._enhance_policies(access_policies)
# Ensure each policy is
# - User is whitelisted
# - Permissions don't exceed allowed permissions
for p in i['accessPolicies']:
if self.key not in p or p[self.key] not in self.users:
if not self | .compare_permissions(p['permissions'], self.permissions):
re | turn False
return True
@staticmethod
def compare_permissions(user_permissions, permissions):
for v in user_permissions.keys():
if user_permissions[v]:
if v not in permissions.keys():
# If user_permissions is not empty, but allowed permissions is empty -- Failed.
return False
# User lowercase to compare sets
lower_user_perm = set([x.lower() for x in user_permissions[v]])
lower_perm = set([x.lower() for x in permissions[v]])
if lower_user_perm.difference(lower_perm):
# If user has more permissions than allowed -- Failed
return False
return True
def _enhance_policies(self, access_policies):
if not access_policies:
return access_policies
if self.graph_client is None:
s = Session(resource='https://graph.windows.net')
self.graph_client = GraphRbacManagementClient(s.get_credentials(), s.get_tenant_id())
# Retrieve graph objects for all object_id
object_ids = [p['objectId'] for p in access_policies]
# GraphHelper.get_principal_dictionary returns empty AADObject if not found with graph
# or if graph is not available.
principal_dics = GraphHelper.get_principal_dictionary(
self.graph_client, object_ids, True)
for policy in access_policies:
aad_object = principal_dics[policy['objectId']]
if aad_object.object_id:
policy['displayName'] = aad_object.display_name
policy['aadType'] |
aendinalt/pymodoro | pomodoro.py | Python | gpl-2.0 | 4,117 | 0.000972 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'aen'
import pygame
import sys
from pygame.locals import USEREVENT, QUIT, MOUSEBUTTONDOWN
def pomodoro():
pygame.mixer.pre_init(frequency=44100, size=-16, channels=1, buffer=512)
pygame.init()
# set up the window
font = pygame.font.Font('freesansbold.ttf', 48) # initialize a font
# define a colors
black = (0, 0, 0)
white = (255, 255, 255)
grey = (150, 150, 150)
red = (255, 0, 0)
gree = (0, 255, 0)
dark_green = (0, 100, 0)
blue = (0, 0, 255)
# define a sounds
global pomo_start_sound, pomo_tick_sound, pomo_end_sound
pomo_start_sound = pygame.mixer.Sound('sounds/pomo_start.wav')
pomo_tick_sound = pygame.mixer.Sound('sounds/pomo_tick.wav')
pomo_end_sound = pygame.mixer.Sound('sounds/pomo_ring.wav')
# define a defaults
pomodoro_time = 25
d_width = 400
d_height = 300
image_dir = 'images/'
start_icon_x = 10
start_icon_y = 10
stop_icon_x = d_width - 10 - 64
stop_icon_y = 10
in_pomodoro = False
# create main window
screen = pygame.display.set_mode((d_width, d_height), 0, 32)
pygame.display.set_caption('Pymodoro!')
pomo_start_icon = pygame.image.load(image_dir + 'pomo_start.png')
pomo_stop_icon = pygame.image.load(image_dir + 'pomo_stop.png')
icon = pygame.image.load(image_dir + 'largeicon.png')
pygame.display.set_icon(icon)
# create background
background = pygame.Surface(screen.get_size())
while True: # main loop
for event in pygame.event.get():
# print event
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
click_x = event.pos[0]
click_y = event.pos[1]
if click_on_start(click_x, click_y, start_icon_x, start_icon_y) and not in_pomodoro:
timeleft = pomodoro_run(pomodoro_time)
in_pomodoro = True
elif click_on_stop(click_x, click_y, stop_icon_x, stop_icon_y) and in_pomodoro:
pomodoro_stop()
in_pomodoro = False
if event.type == USEREVENT + 1:
timeleft -= 1
text = font.render(str(timeleft), 1, dark_green)
if timeleft == 0:
pomodoro_end()
in_pomodoro = False
if event.type == USEREVENT +2:
pomo_tick_sound.play()
# draw section
background.fill(dark_green)
background.blit(icon, ((d_width-256)/2, (d_height-256)/2))
if in_pomodoro:
background.blit(pomo_stop_icon, (stop_icon_x, stop_icon_y))
text = font.render(str(timeleft), 1, dark_green)
background.blit(text, ((d_width-54)/2, (d_height-50)/2))
elif not in_pomodoro:
background.blit(pomo_start_icon, (start_icon_x, start_icon_y))
background = background.convert()
screen.blit(background, (0, 0))
pygame.display.flip()
pygame.display.update()
def click_on_start(click_x, click_y, start_icon_x, start_icon_y):
if (start_icon_x <= click_x <= start_icon_x + 64) \
and (start_icon_y <= click_y <= start_icon_y + 64):
return True
else:
return False
def click_on_stop(click_x, click_y, stop_icon_x, stop_icon_y):
if (stop_icon_x <= click_x <= s | top_icon_x + 64) \
and (stop_icon_y <= click_y <= stop_icon_y + 64):
return True
else:
return False
def pomodoro_run(pomodoro_time):
pomo_start_sound.play()
timeleft = pomodoro_time
pygame.time.set_ | timer(USEREVENT + 1, 1000 * 60)
pygame.time.set_timer(USEREVENT + 2, 867)
return timeleft
def pomodoro_stop():
pygame.time.set_timer(USEREVENT + 1, 0)
pygame.time.set_timer(USEREVENT + 2, 0)
def pomodoro_end():
pomo_end_sound.play()
pomodoro_stop()
if __name__ == '__main__':
pomodoro() |
google/iree | integrations/tensorflow/test/python/iree_tf_tests/uncategorized/linspace_test.py | Python | apache-2.0 | 1,322 | 0.009077 | # Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from absl import app
from iree.tf.support import tf_test_utils
import | numpy as np
import tensorflow.compat.v2 as tf
class LinspaceModule(tf.Module):
def __init__(self):
pass
@tf.function(input_signature=[
tf.TensorSpec([], tf.float32),
tf.TensorSpec([], tf.float32)
])
def linspace(self, start, stop):
# 'num' is const because XLA's iota operation does not support dynamic
# shapes.
num = np.array(3, dtype=np.int32)
return tf.linspace(start, stop, num)
class LinspaceTest(tf_test_utils.Trac | edModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(LinspaceModule)
def test_linspace(self):
def linspace(module):
start = np.array(10., dtype=np.float32)
stop = np.array(12., dtype=np.float32)
module.linspace(start, stop)
self.compare_backends(linspace, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
|
dymkowsk/mantid | scripts/Interface/ui/reflectometer/refl_gui.py | Python | gpl-3.0 | 64,194 | 0.002648 | # pylint: disable = too-many-lines, invalid-name, line-too-long, too-many-instance-attributes,
# pylint: disable = too-many-branches,too-many-locals, too-many-nested-blocks
from __future__ import (absolute_import, division, print_function)
try:
from mantidplot import *
except ImportError:
canMantidPlot = False #
import csv
import os
import re
from operator import itemgetter
import itertools
from PyQt4 import QtCore, QtGui
from mantid.simpleapi import *
from isis_reflectometry.quick import *
from isis_reflectometry.convert_to_wavelength import ConvertToWavelength
from isis_reflectometry import load_live_runs
from isis_reflectometry.combineMulti import *
import mantidqtpython
from mantid.api import Workspace, WorkspaceGroup, CatalogManager, AlgorithmManager
from mantid import UsageService
from ui.reflectometer.ui_refl_window import Ui_windowRefl
from ui.reflectometer.refl_save import Ui_SaveWindow
from ui.reflectometer.refl_choose_col import ReflChoose
from ui.reflectometer.refl_options import ReflOptions
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
canMantidPlot = True
class ReflGui(QtGui.QMainWindow, Ui_windowRefl):
current_instrument = None
current_table = None
current_polarisation_method = None
labelStatus = None
accMethod = None
def __init__(self):
"""
Initialise the interface
"""
super(QtGui.QMainWindow, self).__init__()
self.setupUi(self)
self.loading = False
self.clip = QtGui.QApplication.clipboard()
self.shown_cols = {}
self.mod_flag = False
self.run_cols = [0, 5, 10]
self.angle_cols = [1, 6, 11]
self.scale_col = 16
self.stitch_col = 17
self.plot_col = 18
self.__graphs = dict()
self._last_trans = ""
self.icat_file_map = None
self.__instrumentRuns = None
self.__icat_download = False
self.__group_tof_workspaces = True
# Q Settings
self.__generic_settings = "Mantid/ISISReflGui"
self.__live_data_settings = "Mantid/ISISReflGui/Live | Data"
self.__search_settings = "Mantid/ISISReflGui/Search"
self.__column_settings = "Mantid/ISISReflGui/Columns"
self.__icat_download_ke | y = "icat_download"
self.__ads_use_key = "AlgUse"
self.__alg_migration_key = "AlgUseReset"
self.__live_data_frequency_key = "frequency"
self.__live_data_method_key = "method"
self.__group_tof_workspaces_key = "group_tof_workspaces"
self.__stitch_right_key = "stitch_right"
# Setup instrument with defaults assigned.
self.instrument_list = ['INTER', 'SURF', 'CRISP', 'POLREF', 'OFFSPEC']
self.polarisation_instruments = ['CRISP', 'POLREF']
self.polarisation_options = {'None': PolarisationCorrection.NONE,
'1-PNR': PolarisationCorrection.PNR,
'2-PA': PolarisationCorrection.PA}
# Set the live data settings, use default if none have been set before
settings = QtCore.QSettings()
settings.beginGroup(self.__live_data_settings)
self.live_method = settings.value(self.__live_data_method_key, "", type=str)
self.live_freq = settings.value(self.__live_data_frequency_key, 0, type=float)
if not self.live_freq:
logger.information(
"No settings were found for Update frequency of loading live data, Loading default of 60 seconds")
self.live_freq = float(60)
settings.setValue(self.__live_data_frequency_key, self.live_freq)
if not self.live_method:
logger.information(
"No settings were found for Accumulation Method of loading live data, Loading default of \"Add\"")
self.live_method = "Add"
settings.setValue(self.__live_data_method_key, self.live_method)
settings.endGroup()
settings.beginGroup(self.__generic_settings)
self.__alg_migrate = settings.value(self.__alg_migration_key, True, type=bool)
if self.__alg_migrate:
self.__alg_use = True # We will use the algorithms by default rather than the quick scripts
self.__alg_migrate = False # Never do this again. We only want to reset once.
else:
self.__alg_use = settings.value(self.__ads_use_key, True, type=bool)
self.__icat_download = settings.value(self.__icat_download_key, False, type=bool)
self.__group_tof_workspaces = settings.value(self.__group_tof_workspaces_key, True, type=bool)
self.__scale_right = settings.value(self.__stitch_right_key, True, type=bool)
settings.setValue(self.__ads_use_key, self.__alg_use)
settings.setValue(self.__icat_download_key, self.__icat_download)
settings.setValue(self.__group_tof_workspaces_key, self.__group_tof_workspaces)
settings.setValue(self.__alg_migration_key, self.__alg_migrate)
settings.setValue(self.__stitch_right_key, self.__scale_right)
settings.endGroup()
del settings
# register startup
UsageService.registerFeatureUsage("Interface", "ISIS Reflectomety", False)
def __del__(self):
"""
Save the contents of the table if the modified flag was still set
"""
if self.mod_flag:
self._save(true)
def _save_check(self):
"""
Show a custom message box asking if the user wants to save, or discard their changes or cancel back to the interface
"""
msgBox = QtGui.QMessageBox()
msgBox.setText("The table has been modified. Do you want to save your changes?")
accept_btn = QtGui.QPushButton('Save')
cancel_btn = QtGui.QPushButton('Cancel')
discard_btn = QtGui.QPushButton('Discard')
msgBox.addButton(accept_btn, QtGui.QMessageBox.AcceptRole)
msgBox.addButton(cancel_btn, QtGui.QMessageBox.RejectRole)
msgBox.addButton(discard_btn, QtGui.QMessageBox.NoRole)
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.setDefaultButton(accept_btn)
msgBox.setEscapeButton(cancel_btn)
msgBox.exec_()
btn = msgBox.clickedButton()
saved = None
if btn.text() == accept_btn.text():
ret = QtGui.QMessageBox.AcceptRole
saved = self._save()
elif btn.text() == cancel_btn.text():
ret = QtGui.QMessageBox.RejectRole
else:
ret = QtGui.QMessageBox.NoRole
return ret, saved
def closeEvent(self, event):
"""
Close the window. but check if the user wants to save
"""
self.buttonProcess.setFocus()
if self.mod_flag:
event.ignore()
ret, saved = self._save_check()
if ret == QtGui.QMessageBox.AcceptRole:
if saved:
self.mod_flag = False
event.accept()
elif ret == QtGui.QMessageBox.RejectRole:
event.ignore()
elif ret == QtGui.QMessageBox.NoRole:
self.mod_flag = False
event.accept()
def _instrument_selected(self, instrument):
"""
Change the default instrument to the selected one
"""
config['default.instrument'] = self.instrument_list[instrument]
logger.notice("Instrument is now: " + str(config['default.instrument']))
self.textRB.clear()
self._populate_runs_list()
self.current_instrument = self.instrument_list[instrument]
self.comboPolarCorrect.setEnabled(
self.current_instrument in self.polarisation_instruments) # Enable as appropriate
self.comboPolarCorrect.setCurrentIndex(self.comboPolarCorrect.findText('None')) # Reset to None
def _table_modified(self, row, column):
"""
sets the modified flag when the table is altered
"""
# Sometimes users enter leading or trailing whitespace into a cell.
# Let's remove it for them automatically.
item = self.tableMa |
s20121035/rk3288_android5.1_repo | external/chromium_org/chrome/common/extensions/docs/server2/document_renderer_test.py | Python | gpl-3.0 | 5,797 | 0.002588 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from document_renderer import DocumentRenderer
from server_instance import ServerInstance
from test_file_system import TestFileSystem
from test_data.canned_data import CANNED_TEST_FILE_SYSTEM_DATA
class DocumentRendererUnittest(unittest.TestCase):
def setUp(self):
self._renderer = ServerInstance.ForTest(
TestFileSystem(CANNED_TEST_FILE_SYSTEM_DATA)).document_renderer
self._path = 'apps/some/path/to/document.html'
def _Render(self, document, render_title=False):
return self._renderer.Render(document,
self._path,
render_title=render_title)
def testNothingToSubst | itute(self):
document = 'hello world'
text, warnings = self._Render(document)
self.assertEqual(document, text)
self.assertEqual([], warnings)
| text, warnings = self._Render(document, render_title=True)
self.assertEqual(document, text)
self.assertEqual(['Expected a title'], warnings)
def testTitles(self):
document = '<h1>title</h1> then $(title) then another $(title)'
text, warnings = self._Render(document)
self.assertEqual(document, text)
self.assertEqual(['Found unexpected title "title"'], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual('<h1>title</h1> then title then another $(title)', text)
self.assertEqual([], warnings)
def testTocs(self):
document = ('here is a toc $(table_of_contents) '
'and another $(table_of_contents)')
expected_document = ('here is a toc <table-of-contents> and another '
'$(table_of_contents)')
text, warnings = self._Render(document)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testRefs(self):
# The references in this and subsequent tests won't actually be resolved
document = 'A ref $(ref:baz.baz_e1) here, $(ref:foo.foo_t3 ref title) there'
expected_document = ''.join([
'A ref <a href=/apps/#type-baz_e1>baz.baz_e1</a> here, ',
'<a href=/apps/#type-foo_t3>ref title</a> there'
])
text, warnings = self._Render(document)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testTitleAndToc(self):
document = '<h1>title</h1> $(title) and $(table_of_contents)'
text, warnings = self._Render(document)
self.assertEqual('<h1>title</h1> $(title) and <table-of-contents>', text)
self.assertEqual(['Found unexpected title "title"'], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual('<h1>title</h1> title and <table-of-contents>', text)
self.assertEqual([], warnings)
def testRefInTitle(self):
document = '<h1>$(ref:baz.baz_e1 title)</h1> A $(title) was here'
href = '/apps/#type-baz_e1'
expected_document_no_title = ''.join([
'<h1><a href=%s>title</a></h1> A $(title) was here' % href
])
expected_document = ''.join([
'<h1><a href=%s>title</a></h1> A title was here' % href
])
text, warnings = self._Render(document)
self.assertEqual(expected_document_no_title, text)
self.assertEqual([('Found unexpected title "title"')], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
def testRefSplitAcrossLines(self):
document = 'Hello, $(ref:baz.baz_e1 world). A $(ref:foo.foo_t3\n link)'
expected_document = ''.join([
'Hello, <a href=/apps/#type-baz_e1>world</a>. ',
'A <a href=/apps/#type-foo_t3>link</a>'
])
text, warnings = self._Render(document)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testInvalidRef(self):
# DocumentRenderer attempts to detect unclosed $(ref:...) tags by limiting
# how far it looks ahead. Lorem Ipsum should be long enough to trigger that.
_LOREM_IPSUM = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do '
'eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim '
'ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut '
'aliquip ex ea commodo consequat. Duis aute irure dolor in '
'reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla '
'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in '
'culpa qui officia deserunt mollit anim id est laborum.')
document = ''.join([
'An invalid $(ref:foo.foo_t3 a title ',
_LOREM_IPSUM,
'$(ref:baz.baz_e1) here'
])
expected_document = ''.join([
'An invalid $(ref:foo.foo_t3 a title ',
_LOREM_IPSUM,
'<a href=/apps/#type-baz_e1>baz.baz_e1</a> here'
])
text, warnings = self._Render(document)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._Render(document, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
if __name__ == '__main__':
unittest.main()
|
garyjyao1/ansible | lib/ansible/modules/extras/cloud/cloudstack/cs_securitygroup_rule.py | Python | gpl-3.0 | 14,213 | 0.005841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_securitygroup_rule
short_description: Manages security group rules on Apache CloudStack based clouds.
description:
- Add and remove security group rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
security_group:
description:
- Name of the security group the rule is related to. The security group must be existing.
required: true
state:
description:
- State of the security group rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the security group rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ]
type:
description:
- Ingress or egress security group rule.
required: false
default: 'ingress'
choices: [ 'ingress', 'egress' ]
cidr:
description:
- CIDR (full notation) to be used for security group rule.
required: false
default: '0.0.0.0/0'
user_security_group:
description:
- Security group this rule is based of.
required: false
default: null
start_port:
description:
- Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp).
required: false
default: null
aliases: [ 'port' ]
end_port:
description:
- End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set.
required: false
default: null
icmp_type:
description:
- Type of the icmp message being sent. Required if C(protocol=icmp).
required: false
default: null
icmp_code:
description:
- Error code for this icmp message. Required if C(protocol=icmp).
required: false
default: null
project:
description:
- Name of the project the security group to be created in.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
---
# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default'
- local_action:
module: cs_securitygroup_rule
security_group: default
port: 80
cidr: 1.2.3.4/32
# Allow tcp/udp outbound added to security group 'default'
- local_action:
module: cs_securitygroup_rule
security_group: default
type: egress
start_port: 1
end_port: 65535
protocol: '{{ item }}'
with_items:
- tcp
- udp
# Allow inbound icmp from 0.0.0.0/0 added to security group 'default'
- local_action:
module: cs_securitygroup_rule
security_group: default
protocol: icmp
icmp_code: -1
icmp_type: -1
# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default'
- local_action:
module: cs_securitygroup_rule
security_group: default
port: 80
state: absent
# Allow inbound port 80/tcp from security group web added to security group 'default'
- local_action:
module: cs_securitygroup_rule
security_group: default
port: 80
user_security_group: web
'''
RETURN = '''
---
id:
description: UUID of the of the rule.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
security_group:
description: security group of the rule.
returned: success
type: string
sample: default
type:
description: type of the rule.
returned: success
type: string
sample: ingress
cidr:
description: CIDR of the rule.
returned: success and cidr is defined
type: string
sample: 0.0.0.0/0
user_security_group:
description: user security group of the rule.
returned: success and user_securi | ty_group is defined
type: string
sample: default
protocol:
description: protocol of the rule.
returned: success
type: string
sample: tcp
start_ | port:
description: start port of the rule.
returned: success
type: int
sample: 80
end_port:
description: end port of the rule.
returned: success
type: int
sample: 80
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSecurityGroupRule, self).__init__(module)
self.returns = {
'icmptype': 'icmp_type',
'icmpcode': 'icmp_code',
'endport': 'end_port',
'startport': 'start_port',
'protocol': 'protocol',
'cidr': 'cidr',
'securitygroupname': 'user_security_group',
}
def _tcp_udp_match(self, rule, protocol, start_port, end_port):
return protocol in ['tcp', 'udp'] \
and protocol == rule['protocol'] \
and start_port == int(rule['startport']) \
and end_port == int(rule['endport'])
def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
return protocol == 'icmp' \
and protocol == rule['protocol'] \
and icmp_code == int(rule['icmpcode']) \
and icmp_type == int(rule['icmptype'])
def _ah_esp_gre_match(self, rule, protocol):
return protocol in ['ah', 'esp', 'gre'] \
and protocol == rule['protocol']
def _type_security_group_match(self, rule, security_group_name):
return security_group_name \
and 'securitygroupname' in rule \
and security_group_name == rule['securitygroupname']
def _type_cidr_match(self, rule, cidr):
return 'cidr' in rule \
and cidr == rule['cidr']
def _get_rule(self, rules):
user_security_group_name = self.module.params.get('user_security_group')
cidr = self.module.params.get('cidr')
protocol = self.module.params.get('protocol')
start_port = self.module.params.get('start_port')
end_port = self.get_or_fallback('end_port', 'start_port')
icmp_code = self.module.params.get('icmp_code')
icmp_type = self.module.params.get('icmp_type')
if protocol in ['tcp', 'udp'] and not (start_port and end_port):
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
if protocol == 'icmp' and not (icmp_type and icmp_code):
self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol)
for rule in rules:
if user_security_group_name:
type_match = self._type_security_group_match(rule, user_security_group_name)
else:
type_match = self._type_cidr_match(rule, cidr)
protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \
or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
or self._ah_esp_gre_match(rule, protocol)
)
if type_match and protocol_match:
return rule
return None
def get_security_group(self, security_group_name=None):
if not security_group |
AnsgarSchmidt/sensomatic | server/Tank.py | Python | apache-2.0 | 9,629 | 0.007685 | import os
import time
import json
import datetime
import requests
import threading
import ConfigParser
import paho.mqtt.client as mqtt
from InformationFetcher import InformationFetcher
from Template import TemplateMatcher
from requests.auth import HTTPBasicAuth
SECOND = 1
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
MONTH = 31 * DAY
YEAR = 365 * DAY
class Tank(threading.Thread):
DAWN = 0
DAY = 1
SUNSET = 2
NIGHT = 3
def _readConfig(self):
if self._configMTime != os.stat(self._configFileName).st_mtime:
print "Reread config file for tank"
self._configMTime = os.stat(self._configFileName).st_mtime
update = False
stop = False
if not os.path.isdir(self._homeDir):
print "Creating homeDir"
os.makedirs(self._homeDir)
if os.path.isfile(self._configFileName):
self._config.read(self._configFileName)
else:
print "Config file not found"
update = True
if not self._config.has_section('MQTT'):
print "Adding MQTT part"
update = True
self._config.add_section("MQTT")
if not self._config.has_option("MQTT", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("MQTT", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("MQTT", "ServerPort"):
print "No Server Port"
update = True
self._config.set("MQTT", "ServerPort", "1883")
if not self._config.has_section('TANK'):
print "Adding Tank part"
update = True
self._config.add_section("TANK")
if not self._config.has_option("TANK", "Location"):
print "No Tank Virtual Location"
update = True
self._config.set("TANK", "Location", "Port Of Spain")
if not self._config.has_option("TANK", "LocationOffset"):
print "No Tank Virtual Location Offset"
update = True
self._config.set("TANK", "LocationOffset", "0")
if not self._config.has_option("TANK", "NightTemp"):
print "No Tank Night Temperature"
update = True
self._config.set("TANK", "NightTemp", "23")
if not self._config.has_option("TANK", "DayTemp"):
print "No Tank Day Temperature"
update = True
self._config.set("TANK", "DayTemp", "24")
if not self._config.has_option("TANK", "FertilizerInterval"):
print "No Tank FertilizerInterval"
update = True
self._config.set("TANK", "FertilizerInterval", "3600")
if not self._config.has_option("TANK", "GraphInterval"):
print "No Tank GraphInterval"
update = True
self._config.set("TANK", "GraphInterval", "9000")
if update:
with open(self._configFileName, 'w') as f:
self._config.write(f)
if stop:
print "Please check config file"
sys.exit(0)
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self._homeDir = os.path.expanduser("~/.sensomatic")
self._configFileName = self._homeDir + '/config.ini'
self._configMTime = 0
self._config = ConfigParser.ConfigParser()
self._readConfig()
self._template = TemplateMatcher()
self._info = InformationFetcher()
self._mqclient = mqtt.Client("Tank2", clean_session=True)
self._daystate = Tank.NIGHT
self._twitterdaystate = Tank.NIGHT
self._lastfurtilizer = time.time()
self._lastcharts = time.time()
self._sunpercentage = 0
self._moonpercentage = 0
def _on_connect(self, client, userdata, rc, msg):
print "Connected Tank with result code %s" % rc
self._mqclient.subscribe("livingroom/tank/#")
def _on_message(self, client, userdata, msg):
#print "Mq Received on channel %s -> %s" % (msg.topic, msg.payload)
pass
def _on_disconnect(self, client, userdata, msg):
print "Disconnect MQTTRulez"
def updateSunAndMoon(self):
now = datetime.datetime.now()
dawn, sunrise, noon, sunset, dusk = self._info.getSunTimes(self._config.get("TANK", "Location"), int(self._config.get("TANK", "Locatio | nOffset")))
moonPhase = self._info.getMoonPhase(self | ._config.get("TANK", "Location"))
moonElevation, _ = self._info.getMoonPosition()
if (dawn < now < sunrise):
duration = sunrise - dawn
done = now - dawn
self._daystate = Tank.DAWN
self._sunpercentage = int((done.total_seconds() / duration.total_seconds()) * 100)
elif (sunrise < now < sunset):
self._daystate = Tank.DAY
self._sunpercentage = 100
elif (sunset < now < dusk):
duration = dusk - sunset
done = now - sunset
self._daystate = Tank.SUNSET
self._sunpercentage = int((1.0 - (done.total_seconds() / duration.total_seconds())) * 100)
else:
self._daystate = Tank.NIGHT
self._sunpercentage = 0
# 0 = New moon, 7 = First quarter, 14 = Full moon, 21 = Last quarter
moonphasepercentage = 0.0
if (0 <= moonPhase <= 14):
moonphasepercentage = 1.0 - ( (14.0 - (moonPhase ) ) / 14.0)
else:
moonphasepercentage = ( (14.0 - (moonPhase - 14.0) ) / 14.0)
if moonElevation > 0:
self._moonpercentage = int(moonphasepercentage * (moonElevation / 90.0) * 100)
else:
self._moonpercentage = 0
def publishMQTT(self):
self._mqclient.publish("livingroom/tank/whitelight", self._sunpercentage )
self._mqclient.publish("livingroom/tank/bluelight", self._moonpercentage)
if self._daystate in (Tank.DAWN, Tank.DAY, Tank.SUNSET):
self._mqclient.publish("livingroom/tank/settemp", self._config.get("TANK", "DayTemp"))
else:
self._mqclient.publish("livingroom/tank/settemp", self._config.get("TANK", "NightTemp"))
def publishTwitter(self):
if self._twitterdaystate is not self._daystate:
if self._daystate == Tank.DAWN:
self._mqclient.publish("twitter/text", "Switching light scene to dawn and rise the light level. #Fishtank Yellow #cheerlights")
if self._daystate == Tank.DAY:
self._mqclient.publish("twitter/text", "Switching light scene to day. #Fishtank Warmwhite #cheerlights")
if self._daystate == Tank.SUNSET:
self._mqclient.publish("twitter/text", "Switching light scene to sunset and lover the light level. #Fishtank Orange #cheerlights")
if self._daystate == Tank.NIGHT:
self._mqclient.publish("twitter/text", "Switching light scene to night. #Fishtank Black #cheerlights")
self._twitterdaystate = self._daystate
def publishFertilizer(self):
now = time.time()
if self._daystate == Tank.DAY:
if (now - self._lastfurtilizer) > int(self._config.get("TANK", "FertilizerInterval")):
self._mqclient.publish("livingroom/tank/fertilizer", 1)
self._mqclient.publish("twitter/text", "Adding some material of natural or synthetic origin (other than liming materials). #Fishtank #Fertilizer")
self._lastfurtilizer = now
def publishCharts(self):
now = time.time()
if (now - self._lastcharts) > int(self._config.get("TANK", "GraphInterval")):
try:
j = json.l |
facelessuser/SublimeRandomCrap | sublime_info.py | Python | mit | 2,894 | 0.00311 | """
SublimeInfo Sublime Plugin.
Show info about the system and the current Sublime Text instance.
```
//////////////////////////////////
// Info Commands
//////////////////////////////////
{
"caption": "Sublime Info",
"command": "sublime_info"
},
```
Licensed under MIT
Copyright (c) 2013-2019 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Soft | ware is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO TH | E WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import socket
import sublime
import sublime_plugin
import urllib.request as urllibreq
import traceback
INFO = '''
Platform: {platform}
Hostname: {hostname}
Sublime Version: {version}
Architecture: {arch}
Local IP: {l_ip}
External IP: {e_ip}
'''
def external_ip():
"""Get external IP."""
try:
with urllibreq.urlopen("https://www.myexternalip.com/raw") as url:
e_ip = url.read().decode("utf-8")
except Exception:
e_ip = "???"
print(traceback.format_exc())
return e_ip
def local_ip():
"""Get local IP address."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
l_ip = s.getsockname()[0]
except Exception:
l_ip = '???'
print(traceback.format_exc())
finally:
s.close()
return l_ip
class SublimeInfoCommand(sublime_plugin.ApplicationCommand):
"""Sublime info command."""
def run(self):
"""Run the command."""
info = {
"platform": sublime.platform(),
"hostname": socket.gethostname().lower(),
"version": sublime.version(),
"arch": sublime.arch(),
"l_ip": local_ip(),
"e_ip": external_ip()
}
msg = INFO.format(**info)
# Show in a dialog, console, and copy to clipboard
sublime.message_dialog(msg)
sublime.set_clipboard(msg)
print("\nSublimeInfo: {}".format(msg))
|
sciCloud/OLiMS | lims/controlpanel/bika_labcontacts.py | Python | agpl-3.0 | 4,479 | 0.004465 | from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import schemata
from dependencies import atapi
from dependencies.dependency import registerType
from dependencies.dependency import permissions
from dependencies.dependency import getToolByName
from lims.browser.bika_listing import BikaListingView
from lims.config import PROJECTNAME
from lims import bikaMessageFactory as _
from lims.utils import t
from lims.content.bikaschema import BikaFolderSchema
from lims.content.labcontact import LabContact
from dependencies.dependency import IViewView
from dependencies.dependency import IFolderContentsView
from lims.interfaces import ILabContacts
from dependencies.folder import ATFolder, ATFolderSchema
from dependencies.dependency import implements
class LabContactsView(BikaListingView):
implements(IFolderContentsView, IViewView)
def __init__(self, context, request):
super(LabContactsView, self).__init__(context, request)
self.catalog = 'bika_setup_catalog'
self.contentFilter = {'portal_type': 'LabContact',
'sort_on': 'sortable_title'}
self.context_actions = {_('Add'):
{'url': 'createObject?type_name=LabContact',
'icon': '++resource++bika.lims.images/add.png'}}
self.title = self.context.translate(_("Lab Contacts"))
self.icon = self.portal_url + "/++resource++bika.lims.images/lab_contact_big.png"
self.description = ""
self.show_sort_column = False
self.show_select_row = False
self.show_select_column = True
self.pagesize = 25
self.columns = {
'Fullname': {'title': _('Name'),
'index': 'getFullname'},
'Department': {'title': _('Department'),
'index': 'getDepartmentTitle',
'toggle': True},
'BusinessPhone': {'title': _('Phone'),
'toggle': True},
'Fax': {'title': _('Fax'),
'toggle': True},
'MobilePhone': {'title': _('Mobile Phone'),
'toggle': True},
'EmailAddress': {'title': _('Email Address'),
'toggle': True},
}
self.review_states = [
{'id':'default',
'title': _('Active'),
'contentFilter': {'inactive_state': 'active'},
'transitions': [{'id':'deactivate'}, ],
'columns': ['Fullname',
'Department',
'BusinessPhone',
'Fax',
'MobilePhone',
'EmailAddress']},
{'id':'inactive',
'title': _('Dormant'),
'contentFilter': {'inactive_state': 'inactive'},
'transitions': [{'id':'activate'}, ],
'columns': ['Fullname',
'Department',
'BusinessPhone',
'Fax',
'MobilePhone',
'EmailAddress']},
{'id':'all',
'title': _('All'),
'contentFilter':{},
'columns': ['Fullname',
'Department',
'BusinessPhone',
'Fax',
'MobilePhone',
'EmailAddress']},
]
def folderitems(self):
items = BikaListingView.folderitems(self)
for x in range(len(items)):
if not items[x].has_key('obj'): continue
obj = items[x]['obj']
items[x]['Fullname'] = | obj.getFullname()
items[x]['Department'] = obj.getDepartmentTitle()
items[x]['BusinessPhone'] = obj.getBusinessPhone()
| items[x]['Fax'] = obj.getBusinessFax()
items[x]['MobilePhone'] = obj.getMobilePhone()
items[x]['EmailAddress'] = obj.getEmailAddress()
items[x]['replace']['Fullname'] = "<a href='%s'>%s</a>" % \
(items[x]['url'], items[x]['Fullname'])
return items
schema = ATFolderSchema.copy()
class LabContacts(ATFolder):
implements(ILabContacts)
displayContentsTab = False
schema = schema
schemata.finalizeATCTSchema(schema, folderish = True, moveDiscussion = False)
atapi.registerType(LabContacts, PROJECTNAME)
|
5monkeys/python-email-reply-parser | email_reply_parser/parser.py | Python | mit | 7,227 | 0.000415 | """
Ported from https://github.com/github/email_reply_parser
EmailReplyParser is a small library to parse plain text email content. The
goal is to identify which fragments are quoted, part of a signature, or
original body content. We want to support both top and bottom posters, so
no simple "REPLY ABOVE HERE" content is used.
Beyond RFC 5322 (which is handled by the [Ruby mail gem][mail]), there aren't
any real standards for how emails are created. This attempts to parse out
common conventions for things like replies:
this is some text
On <date>, <author> wrote:
| > blah blah
> blah blah
... and signatures:
this is some text
--
Bob
http://homepage.com/~bob
Each of these are parsed into Fragment objects.
EmailReplyParser also attempts to figure out which of these blocks s | hould
be hidden from users.
"""
import re
class Fragment(object):
"""
Represents a group of paragraphs in the email sharing common attributes.
Paragraphs should get their own fragment if they are a quoted area or a
signature.
"""
def __init__(self, quoted, first_line):
self.signature = False
self.hidden = False
self.lines = [first_line] if first_line is not None else []
self.content = None
self.quoted = quoted
def finish(self):
"""
Builds the string content by joining the lines and reversing them.
"""
self.content = '\n'.join(self.lines)[::-1]
self.lines = None
def __str__(self):
return self.content
class Email(object):
"""An Email instance represents a parsed body String."""
multiline_pattern = re.compile(
r"^(?!On.*On\s.+?wrote:)(On\s(?:.+?)wrote:)$", re.M | re.I | re.S)
underscore_pattern = re.compile(r"([^\n])(?=\n_{7}_+)$", re.M)
signature_pattern = re.compile(
r"(?m)(--\s*$|__\s*$|\w-$)|(^(\w+\s*){1,3} ym morf tneS$)")
header_pattern = re.compile(r"^:etorw.*nO$")
empty = ""
def __init__(self):
self.fragments = []
def visible_text(self):
"""Gets the combined text of the visible fragments of the email body."""
visible = '\n'.join([str(f) for f in self.fragments if not f.hidden])
return visible.rstrip()
def read(self, text):
"""
Splits the given text into a list of Fragments. This is roughly done by
reversing the text and parsing from the bottom to the top. This way we
can check for 'On <date>, <author> wrote:' lines above quoted blocks.
"""
# Normalize line endings
text = text.replace("\r\n", "\n")
# Check for multi-line reply headers. Some clients break up
# the "On DATE, NAME <EMAIL> wrote:" line into multiple lines.
text = self.multiline_pattern.sub(
lambda matchobj: matchobj.group(0).replace("\n", " "), text)
# Some users may reply directly above a line of underscores.
# In order to ensure that these fragments are split correctly,
# make sure that all lines of underscores are preceded by
# at least two newline characters.
text = self.underscore_pattern.sub("\\1\n", text)
# The text is reversed initially due to the way we check for hidden
# fragments.
text = text[::-1]
# This determines if any 'visible' Fragment has been found. Once any
# visible Fragment is found, stop looking for hidden ones.
self.found_visible = False
# This instance variable points to the current Fragment. If the matched
# line fits, it should be added to this Fragment. Otherwise, finish it
# and start a new Fragment.
self.fragment = None
for line in text.split('\n'):
self.scan_line(line)
# Finish up the final fragment. Finishing a fragment will detect any
# attributes (hidden, signature, reply), and join each line into a
# string.
self.finish_fragment()
self.fragment = None
# Now that parsing is done, reverse the order.
self.fragments = self.fragments[::-1]
return self
def scan_line(self, line):
"""
Scans the given line of text and figures out which fragment it belongs
to.
"""
line = line.rstrip("\n")
if not self.signature_pattern.search(line):
line = line.lstrip()
# We're looking for leading `>`'s to see if this line is part of a
# quoted Fragment.
is_quoted = line.endswith('>')
# Mark the current Fragment as a signature if the current line is empty
# and the Fragment starts with a common signature indicator.
if (self.fragment and line == self.empty
and self.signature_pattern.search(self.fragment.lines[-1])):
self.fragment.signature = True
self.finish_fragment()
# If the line matches the current fragment, add it. Note that a common
# reply header also counts as part of the quoted Fragment, even though
# it doesn't start with `>`.
if (self.fragment and
((self.fragment.quoted == is_quoted) or
self.fragment.quoted and
(self.quote_header(line) or line == self.empty))):
self.fragment.lines.append(line)
# Otherwise, finish the fragment and start a new one.
else:
self.finish_fragment()
self.fragment = Fragment(is_quoted, line)
def quote_header(self, line):
"""
Detects if a given line is a header above a quoted area. It is only
checked for lines preceding quoted regions.
"""
return bool(self.header_pattern.search(line))
def finish_fragment(self):
"""
Builds the fragment string and reverses it, after all lines have been
added. It also checks to see if this Fragment is hidden. The hidden
Fragment check reads from the bottom to the top.
Any quoted Fragments or signature Fragments are marked hidden if they
are below any visible Fragments. Visible Fragments are expected to
contain original content by the author. If they are below a quoted
Fragment, then the Fragment should be visible to give context to the
reply.
some original text (visible)
> do you have any two's? (quoted, visible)
Go fish! (visible)
> --
> Player 1 (quoted, hidden)
--
Player 2 (signature, hidden)
"""
frag = self.fragment
if frag:
frag.finish()
if not self.found_visible:
if (frag.quoted or frag.signature
or str(frag).strip() == self.empty):
frag.hidden = True
else:
self.found_visible = True
self.fragments.append(frag)
self.fragment = None
def read(text):
"""Splits an email body into a list of Fragments."""
return Email().read(text)
def parse_reply(text):
"""Get the text of the visible portions of the given email body."""
return read(text).visible_text()
|
Jokeren/neon | loader/test/raw_to_wav.py | Python | apache-2.0 | 136 | 0 | import nump | y as np
from scipy.io.wavfile import write
a = np.fromfile('/tmp/file.raw', dtype='int16')
write('/tmp/file.wav', 16000, a) | |
wadobo/socializa | backend/game/models.py | Python | agpl-3.0 | 3,494 | 0.004293 | import re
from django.contrib.auth.models import User
from django.db import models
from common.models import ExtraBase
CHALLENGES_TYPE = (
('p', 'playable player'),
('np', 'not playable player'),
)
class Challenge(models.Model, ExtraBase):
name = models.CharField(max_length=200, blank=True, null=True)
desc = models.TextField(max_length=1024, blank=True, null=True)
solution = models.TextField(max_length=1024, blank=True, null=True)
ctype = models.CharField(max_length=8, choices=CHALLENGES_TYPE, default='p')
extra = models.TextField(max_length=1024, blank=True, null=True)
# options in extra:
# {"options":
# [
# {"type": "text", "question": "who is the killer?"},
# {"type": "option", "question": "with which weapon?",
# "answers": ["knife", "rope", "gun", "bare hands", "venom"]},
# ...
# ]
# }
depends = models.ManyToManyField('Challenge', related_name="requiedby",
blank=True)
# challenges to give when resolve this challenge, only if solution is
# not null and we resolve this
child_challenges = models.ManyToManyField('Challenge',
related_name="parents",
blank=True)
def mainclues(self):
return self.clues.filter(main=True)
def depends_on(self):
return ", ".join(i.name for i in self.depends.all())
def get_desc_html(self):
# search #[NUM][solution] and return [('NUM', 'solution'), ... ]
qregex = re.compile("#\[[\d]+\]\[([^#]*)\]")
desc_html = self.desc[:]
for sre in qregex.finditer(self.desc):
ini_pos, end_pos = sre.span()
rex = self.desc[ini_pos:end_pos]
solution = sre.group(1)
desc_html = desc_html.replace(rex, "<b>{}</b>".format(solution))
return desc_html
def __str__(self):
desc = self.desc[:10] if self.desc else "-"
return "{} - {}...".format(self.name, desc)
class Meta:
ordering = ['pk']
class Game(models.Model, ExtraBase):
name = models.CharField(max_length=200, blank=True, null=True)
desc = models.TextField(max_length=1024, blank=True, null=True)
solution = models.TextField(max_length=1024, blank=True, null=True)
challenges = models.ManyToManyField(Challen | ge, related_name="games")
author = models.ForeignKey(User, related_name="games", blank=True, null=True)
auto_assign_clue = models.BooleanField(default=True)
visible_players = models.BooleanField(default=True)
extra = models.TextField(max_length=1024, blank=True, null=T | rue)
# options in extra:
# {"options":
# [
# {"type": "text", "question": "who is the killer?"},
# {"type": "option", "question": "with which weapon?",
# "answers": ["knife", "rope", "gun", "bare hands", "venom"]},
# ...
# ]
# }
def get_desc_html(self):
# search #[NUM][type][question] and return [('NUM', 'type', 'question'), ... ]
qregex = re.compile("#\[[\d]+\]\[(?:option|text)\]\[([^#]*)\]")
desc_html = self.desc[:]
for sre in qregex.finditer(self.desc):
ini_pos, end_pos = sre.span()
rex = self.desc[ini_pos:end_pos]
question = sre.group(1)
desc_html = desc_html.replace(rex, "<b>{}</b>".format(question))
return desc_html
def __str__(self):
return self.name
|
heineman/algorithms-nutshell-2ed | PythonCode/adk/knapsack.py | Python | mit | 4,073 | 0.009084 | """
Unbounded knapsack: *(http://www.mathcs.emory.edu/~cheung/Courses/323/Syllabus/DynProg/knapsack2.html
"""
class Item:
def __init__(self, value, weight):
"""Create item with given value and weight. """
self.value = value
self.weight = weight
def __str__(self):
return "(weight=" + str(self.weight) + ", value=" + str(self.value) + ")"
class ApproximateItem(Item):
"""
Extends Item by storing the normalized value and the original position of the item
before sorting.
"""
def __init__(self, item, idx):
Item.__init__(self, item.value, item.weight)
self.normalizedValue = item.value/item.weight
self.index = idx
# Not to be exported. This exists for timeit code in test_knapsack.
_best = []
def record_best(val = None):
"""Helper method for performance knapsack. Placed here so timeit code in test_knapsack will work."""
global _best
if val:
_best.append(val)
else:
rc = _best
_best = []
return rc
def knapsack_approximate (items, W):
"""
Compute approximation to knapsack problem using Dantzig approach.
"""
approxItems = []
n = len(items)
for idx in range(n):
approxItems.append(ApproximateItem(items[idx], idx))
approxItems.sort (key=lambda x:x.normalizedValue, reverse=True)
selections = [0] * n
w = W
total = 0
for idx in range(n):
item = approxItems[idx]
if w == 0:
break
# find out how many fit
numAdd = w // item.weight
if numAdd > 0:
selections[item.index] += numAdd
w -= numAdd * item. | weight
total += numAdd * item.value
return (total, selections)
def knapsack_unbounded (items, W):
"""
Compute unbounded knapsack solution (any number of each item is available)
for set of items with corresponding weights and values. Return total
weight and sele | ction of items.
"""
n = len(items)
progress = [0] * (W+1)
progress[0] = -1
m = [0] * (W + 1)
for j in range(1, W+1):
progress[j] = progress[j-1]
best = m[j-1]
for i in range(n):
remaining = j - items[i].weight
if remaining >= 0 and m[remaining] + items[i].value > best:
best = m[remaining] + items[i].value
progress[j] = i
m[j] = best
selections = [0] * n
i = n
w = W
while w >= 0:
choice = progress[w]
if choice == -1:
break
selections[choice] += 1
w -= items[progress[w]].weight
# Uncomment this code if you want to see the computed m[] matrix
#out = ''
#for _ in m:
# out = out + "," + str(_)
#print (out)
return (m[W], selections)
def knapsack_01 (items, W):
"""
Compute 0/1 knapsack solution (just one of each item is available)
for set of items with corresponding weights and values. Return total weight
and selection of items.
"""
n = len(items)
m = [None] * (n+1)
for i in range(n+1):
m[i] = [0] * (W+1)
for i in range(1,n+1):
for j in range(W+1):
if items[i-1].weight <= j:
valueWithItem = m[i-1][j-items[i-1].weight] + items[i-1].value
m[i][j] = max(m[i-1][j], valueWithItem)
else:
m[i][j] = m[i-1][j]
selections = [0] * n
i = n
w = W
while i > 0 and w >= 0:
if m[i][w] != m[i-1][w]:
selections[i-1] = 1
w -= items[i-1].weight
i -= 1
# Uncomment this code if you want to see the computed m[] matrix
#out = ''
#for i in range(n+1):
# for w in range(W+1):
# out = out + "," + str(m[i][w])
# out = out + "\n"
#print (out)
return (m[n][W], selections)
|
breandan/java-algebra-system | examples/sicora.py | Python | gpl-2.0 | 382 | 0.04712 | #
# jython examples for jas.
# $Id$
#
import sys;
from jas import Ring, Ideal
fr | om jas import startLog, terminate
# sicora, e-gb example
r = | Ring( "Z(t) L" );
print "Ring: " + str(r);
print;
ps = """
(
( 2 t + 1 ),
( t**2 + 1 )
)
""";
f = r.ideal( ps );
print "Ideal: " + str(f);
print;
#startLog();
g = f.eGB();
print "seq e-GB:", g;
print "is e-GB:", g.iseGB();
print;
|
thoreg/raus-mit-den-kids | rmdk/location/forms.py | Python | mit | 275 | 0 | # -*- coding: utf-8 -*-
from django import forms
from location.models import Address
class Address | AdminForm(forms.ModelForm):
class Meta:
model = Address
widgets = {
'description': | forms.Textarea(attrs={'cols': 80, 'rows': 20}),
}
|
scorpiontahir02/easysub | src/easysub/subscene.py | Python | mit | 2,817 | 0.035144 | import os
import urlparse
import zipfile
import requests
from bs4 import BeautifulSoup
from downloader import Downloader
from common import Subtitles, User
class Subscene(object):
def __init__(self):
super(Subscene, self).__init__()
self._base_url = u'http://subscene.com/'
self._search_url = self._get_full_url(u'/subtitles/title')
self._cookies = dict(LanguageFilter=u'13')
self._timeout = 10.0
def _get_full_url(self, relative_url):
return urlparse.urljoin(self._base_url, relative_url)
def _get_soup(self, source):
return BeautifulSoup(source, u'html.parser')
def _parse_search_results_source(self, source):
if not source:
return list()
soup = self._get_soup(source)
subs = list()
for tr in soup.find(u'tbody').find_all(u'tr'):
sub = Subtitles()
tds= tr.find_all(u'td')
try:
sub.page_url = self._get_full_url(tds[0].find(u'a')[u'href'])
except IndexError, e:
pass
else:
try:
sub.language = tds[0].find_all(u'span')[0].text.strip()
except IndexError, e:
pass
try:
sub.name = tds[0].find_all(u'span')[1].text.strip()
except IndexError, e:
pass
try:
sub.files_count = int(tds[1].text.strip()) if tds[1].text.strip() else None
except IndexError, e:
pass
try:
sub.hearing_impared = True if u'a41' in tds[2][u'class'] else False
except IndexError, e:
pass
try:
sub.uploader.name = tds[3].find(u'a').text.strip()
sub.uploader.profile_url = self._get_full_url(tds[3].find('a')['href'])
except IndexError, e:
pass
try:
sub.comment = tds[4].find(u'div').text.strip()
except IndexError, e:
pass
if sub.page_url:
subs.append(sub)
retur | n subs
def search(self, query):
if not query:
return list()
data = {
u'q': query,
u'l': None
}
r = requests.get(self._search_url, params=data, cookies=self._cookies, timeout=self._timeout)
if r.status_code == 200:
return self._parse_search_results_source(r.text)
def _extract_sub_zip(self, zip_path, sub_path):
if zipfile.is_zipfile(zip_path):
try:
with zipfile.ZipFile(zip_path) as | z:
with z.open(z.namelist()[0]) as sz, open(sub_path, u'wb') as so:
so.write(sz.read())
return True
except Exception, e:
pass
return False
def download(self, sub, path):
r = requests.get(sub.page_url)
if r.status_code == 200:
soup = self._get_soup(r.text)
sub.url = self._get_full_url(
soup.find(u'a', id=u'downloadButton')[u'href']
)
dl = Downloader()
zip_path = os.path.splitext(path)[0] + u'.zip'
if dl.download_file(sub.url, zip_path):
is_extration_success = self._extract_sub_zip(zip_path, path)
try:
os.remove(zip_path)
except OSError, e:
pass
if is_extration_success:
return True
return False
|
lfalvarez/nouabook | elections/tests/photo_loader_tests.py | Python | gpl-3.0 | 1,842 | 0.005429 | # coding=utf-8
from elections.tests import VotaInteligenteTestCase as TestCase
from elections.models import Election
from django.core.urlresolvers import reverse
from candideitorg.models import Candidate
from django.core.management import call_command
class PhotoLoaderCase(TestCase):
def setUp(self):
super(PhotoLoaderCase, self).setUp()
def test_it_loads_the_photo_for_an_existing_candidate(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo_url.csv', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'http://upload.wikimedia.org/wikipedia/commons/7/76/Alejandro_Guillier.jpg')
s | elf.assertEquals(otro.photo, 'http://www.2eso.info/sinonimos/wp-content/uploads/2013/02/feo1.jpg')
def test_if_the_candidate_does_not_exist_it_does_it_for_the_rest(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo_url.csv', verbosity=0)
jano = Can | didate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'http://upload.wikimedia.org/wikipedia/commons/7/76/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'http://www.2eso.info/sinonimos/wp-content/uploads/2013/02/feo1.jpg')
def test_it_prepends_url_when_provided(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo.csv', 'some.site/static/', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'some.site/static/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'some.site/static/feo1.jpg') |
ifduyue/sentry | src/sentry/identity/pipeline.py | Python | bsd-3-clause | 2,192 | 0.001825 | from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
| external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.nam | e,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))
|
prasannav7/ggrc-core | src/ggrc/models/section.py | Python | apache-2.0 | 3,107 | 0.003862 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under ht | tp://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: vraj@reciprocitylabs.com |
from ggrc import db
from ggrc.models.directive import Directive
from ggrc.models.mixins import CustomAttributable
from ggrc.models.mixins import Described
from ggrc.models.mixins import Hierarchical
from ggrc.models.mixins import Hyperlinked
from ggrc.models.mixins import Noted
from ggrc.models.mixins import Slugged
from ggrc.models.mixins import Stateful
from ggrc.models.mixins import Titled
from ggrc.models.mixins import WithContact
from ggrc.models.mixins import deferred
from ggrc.models.object_document import Documentable
from ggrc.models.object_owner import Ownable
from ggrc.models.object_person import Personable
from ggrc.models.reflection import AttributeInfo
from ggrc.models.relationship import Relatable
from ggrc.models.relationship import Relationship
from ggrc.models.track_object_state import HasObjectState
from ggrc.models.track_object_state import track_state_for_class
class Section(HasObjectState, Hierarchical, Noted, Described, Hyperlinked,
WithContact, Titled, Slugged, Stateful, db.Model,
CustomAttributable, Documentable, Personable,
Ownable, Relatable):
VALID_STATES = [
'Draft',
'Final',
'Effective',
'Ineffective',
'Launched',
'Not Launched',
'In Scope',
'Not in Scope',
'Deprecated',
]
__tablename__ = 'sections'
_table_plural = 'sections'
_title_uniqueness = True
_aliases = {
"url": "Section URL",
"description": "Text of Section",
"directive": {
"display_name": "Policy / Regulation / Standard / Contract",
"type": AttributeInfo.Type.MAPPING,
"filter_by": "_filter_by_directive",
}
}
na = deferred(db.Column(db.Boolean, default=False, nullable=False),
'Section')
notes = deferred(db.Column(db.Text), 'Section')
_publish_attrs = [
'na',
'notes',
]
_sanitize_html = ['notes']
_include_links = []
@classmethod
def _filter_by_directive(cls, predicate):
types = ["Policy", "Regulation", "Standard", "Contract"]
dst = Relationship.query \
.filter(
(Relationship.source_id == cls.id) &
(Relationship.source_type == cls.__name__) &
(Relationship.destination_type.in_(types))) \
.join(Directive, Directive.id == Relationship.destination_id) \
.filter(predicate(Directive.slug) | predicate(Directive.title)) \
.exists()
src = Relationship.query \
.filter(
(Relationship.destination_id == cls.id) &
(Relationship.destination_type == cls.__name__) &
(Relationship.source_type.in_(types))) \
.join(Directive, Directive.id == Relationship.source_id) \
.filter(predicate(Directive.slug) | predicate(Directive.title)) \
.exists()
return dst | src
track_state_for_class(Section)
|
sillvan/hyperspy | doc/conf.py | Python | gpl-3.0 | 7,719 | 0.005312 | # -*- coding: utf-8 -*-
#
# HyperSpy documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 18 11:10:55 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append('../')
from hyperspy import Release
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'gen_rst',
'numpydoc',
'matplotlib.sphinxext.only_directives',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'ipython_console_highlighting'] # , 'rst2pdf.pdfbuilder']
try:
import sphinxcontrib.spelling
extensions.append('sphinxcontrib.spelling')
except:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HyperSpy'
copyright = u'2012, The HyperSpy development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = Release.version
# The full version, including alpha/beta/rc tags.
release = Release.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/hyperspy_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'hyperspy_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperSpydoc'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass | [howto/manual]).
latex_documents = [
('index', 'HyperSpy.tex', u'HyperSpy Documentation',
u'The Hyp | erSpy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hyperspy', u'HyperSpy Documentation',
[u'The HyperSpy developers'], 1)
]
# Add the hyperspy website to the intersphinx domains
intersphinx_mapping = {'hyperspyweb': ('http://hyperspy.org/', None)}
def setup(app):
app.add_javascript('copybutton.js')
|
Svjard/flightpath | config/urls.py | Python | mit | 508 | 0.001969 | from django.conf.urls import include, patterns, url
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'', include('main.urls')),
url(r'^api/v1/', include('authentication.urls')),
url(r'^a | pi/v1/', include('posts.urls')),
url(r'^admin/', include(admin.site.urls))
)
urlpatterns += [
url(r'^api-auth/', include('rest_fra | mework.urls',
namespace='rest_framework'))
]
# Redirect to webapp URL
# TODO Server-side rendering
urlpatterns += [
url(r'^.*$', include('main.urls')),
]
|
clouserw/zamboni | mkt/site/management/commands/clean_redis.py | Python | bsd-3-clause | 3,336 | 0 | import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFil | e(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. You have to pass sys.executable both as the
# thing to run and so argv[0] is set properly.
os.execl(sys.executable, sys.executable, sys.argv[0],
sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l' | , filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1) if total[0] else 0
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
|
CCI-MOC/GUI-Backend | allocation/models/__init__.py | Python | apache-2.0 | 866 | 0.004619 | from allocation.models.inputs import TimeUnit, Provider, Machine, Size, Instance, InstanceHistory, Al | locationIncrease, AllocationUnlimited, AllocationRecharge, Allocation
from allocation.models.results import InstanceHis | toryResult, InstanceResult, TimePeriodResult, AllocationResult
from allocation.models.rules import Rule, GlobalRule, InstanceRule, CarryForwardTime, FilterOutRule, InstanceCountingRule, InstanceMultiplierRule, IgnoreStatusRule, IgnoreMachineRule, IgnoreProviderRule, MultiplyBurnTime, MultiplySizeCPU, MultiplySizeDisk, MultiplySizeRAM
from allocation.models.strategy import PythonAllocationStrategy, PythonRulesBehavior, GlobalRules, NewUserRules, StaffRules, MultiplySizeCPURule, IgnoreNonActiveStatus, PythonRefreshBehavior, OneTimeRefresh, RecurringRefresh, PythonCountingBehavior, FixedWindow, FixedStartSlidingWindow, FixedEndSlidingWindow
|
victorpoluceno/shortener_frontend | rest_api/api.py | Python | bsd-2-clause | 1,486 | 0.002692 | from django.db import models
from tastypie.resources import ModelResource
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.throttle import CacheThrottle
from tastypie.validation import FormValidation
from gateway_backend.tasks import url_short
from gateway_backend.models import Url
from rest_api.forms import UrlForm
from django.conf import settings
class UrlThrottle(CacheThrottle):
def should_be_throttled(self, identifier, **kwargs):
try:
should_be_throttled = settings.SHOULD_BE_THROTTLED
except (AttributeError):
should_be_throttled = True
if super(UrlThrottle, self).should_be_throttled(identifier, **kwargs) \
and should_be_throttled:
return True
return False
class UrlResource(ModelResource):
class Meta:
resource_name = 'url'
queryset = Url.objects.all()
fields = ['long_url', 'key']
authentication = ApiKeyAuthentication()
authorization = DjangoAuthorization()
throttle = Ur | lThrottle(throttle_at=100)
allowed_methods = ['get', 'post']
validation = FormValidation(form_class=UrlForm)
def url_post_save(sender, **kwargs):
instance = kwargs.get('instance | ')
if kwargs.get('created') == True:
url_short.delay(instance.id)
models.signals.post_save.connect(url_post_save, sender=Url,
dispatch_uid='url_create')
|
bit-jmm/ttarm | nmf/_mm_performance.py | Python | gpl-2.0 | 1,942 | 0 | # -*- coding: utf-8 -*-
# 大規模疎行列 x 密行列演算の実行速度検証
# @nashibao
# 使い方:
# python mm_performance.py l1 l2 l3 alpha
# l1, l2: 疎行列サイズ
# l2, l3: 密行列サイズ
# alpha: 疎行列充填率
# sp_matrix(l1, l2, alpha) x dense_vec(l2, l3)
# 結果:
# 2013/01/22
# python mm_performance.py 100000 10000 10 0.005
# prepare..
# 0.437 sec
# compute..
# 0.067 sec
# python mm_performance.py 100000 100000 0.0005
# prepare..
# 0.428 sec
# 39.990176 Mbytes
# compute..
# 0.130 sec
# python mm_performance.py 3000000 10000 0.005
# prepare..
# 34.109 sec
# compute..
# 1.972 sec
# macbook pro retina 15inch
# 2.7 GHz Core i7
# 16GB 1600MHz DDR3
# OSX 10.8.2
from time import time
import numpy as np
import scipy.sparse as sp
from sys import argv, float_info
# 行列の準備
def prepare_matrices(l1=1000, l2=1000, alpha=0.1):
# indexes and values of non-zero components
# ii = np.random.randint(0, l, (int(l * l * alpha), 2))
num = int(l1 * l2 * alpha)
r = np.random.rand(num)
i1 = np.random.randint(0, l1, size=num)
i2 = np.random.randint(0, l2, size=num)
# create a lil sparse matrix
print "prepare.."
t1 = time()
# A = sp.coo_matrix((r, (i1, i2)), shape=(l, l))
A = sp.csr_matrix((r, (i1, i2)), shape=(l1, l2))
# A = sp.lil_matrix((l, l))
# for n, i in enumerate(ii):
# A[i] = r[n]
t2 = time()
print "%8.3f sec" % (t2 - t1)
print "%f Mbytes" % | ((float)(A.data.nbytes) / 1000000)
return A
def tests_sparse | _matmul(A, vec):
"""
compare speed of matrix product
"""
print "compute.."
t1 = time()
A * vec
t2 = time()
print "%8.3f sec" % (t2 - t1)
if __name__ == "__main__":
l1 = int(argv[1])
l2 = int(argv[2])
l3 = int(argv[3])
alpha = float(argv[4])
A = prepare_matrices(l1, l2, alpha)
vec = np.random.rand(l2, l3)
tests_sparse_matmul(A, vec)
|
sorenh/cc | vendor/Twisted-10.0.0/twisted/conch/test/test_recvline.py | Python | apache-2.0 | 21,585 | 0.002363 | # -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) 2001-2008 Twisted | Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.recvline} and fixtures for testing related
functionality.
"""
import sys, os
from twisted.conch.insults import insults
from twisted.conch import recvline
from twisted.python import reflect, components
from twisted.internet import defer, error
from twisted.trial import unittest
from twi | sted.cred import portal
from twisted.test.proto_helpers import StringTransport
class Arrows(unittest.TestCase):
def setUp(self):
self.underlyingTransport = StringTransport()
self.pt = insults.ServerProtocol()
self.p = recvline.HistoricRecvLine()
self.pt.protocolFactory = lambda: self.p
self.pt.factory = self
self.pt.makeConnection(self.underlyingTransport)
# self.p.makeConnection(self.pt)
def testPrintableCharacters(self):
self.p.keystrokeReceived('x', None)
self.p.keystrokeReceived('y', None)
self.p.keystrokeReceived('z', None)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
def testHorizontalArrows(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
def testNewline(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('c')
kR('b')
kR('a')
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('\n')
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123', 'cba'), ()))
def testVerticalArrows(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc'), ('123',)))
self.assertEquals(self.p.currentLineBuffer(), ('123', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz',), ('abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('abc', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
for i in range(4):
kR(self.pt.DOWN_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
def testHome(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
self.assertEquals(self.p.currentLineBuffer(), ('', 'hello, world'))
def testEnd(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
kR(self.pt.END)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
def testBackspace(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('', 'y'))
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('', 'y'))
def testDelete(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('x', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
def testInsert(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
# kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEquals(self.p.currentLineBuffer(), ('xyA', 'z'))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEquals(self.p.currentLineBuffer(), ('xyB', 'Az'))
def testTypeover(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEquals(self.p.currentLineBuffer(), ('xyA', ''))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEquals(self.p.currentLineBuffer(), ('xyB', ''))
from twisted.conch import telnet
from twisted.conch.insults import helper
from twisted.protocols import loopback
class EchoServer(recvline.HistoricRecvLine):
def lineReceived(self, line):
self.terminal.write(line + '\n' + self.ps[self.pn])
# An insults API for this would be nice.
left = "\x1b[D"
right = "\x1b[C"
up = "\x1b[A"
down = "\x1b[B"
insert = "\x1b[2~"
home = "\x1b[1~"
delete = "\x1b[3~"
end = "\x1b[4~"
backspace = "\x7f"
from twisted.cred import checkers
try:
from twisted.conch.ssh import userauth, transport, channel, connection, session
from twisted.conch.manhole_ssh import TerminalUser, TerminalSession, TerminalRealm, TerminalSessionTransport, ConchFactory
except ImportError:
ssh = False
else:
ssh = True
class SessionChannel(channel.SSHChannel):
name = 'session'
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
channel.SSHChannel.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def channelOpen(self, data):
term = session.packRequest_pty_req("vt102", (self.height, self.w |
abacuspix/NFV_project | Instant_Flask_Web_Development/sched/forms.py | Python | mit | 1,083 | 0 | """Forms to render HTML input & validate request data."""
from wtforms import Form, BooleanField, DateTimeField, PasswordField
from wtforms import TextAreaFi | eld, TextField
from wtforms.validators import Length, required
class AppointmentForm(Form):
"""Render HTML input for Appointment model & validate submissions.
This matches the models.Appointment class very closely. Where
models.Appointment represents the domain and its persistence, this class
represents how to display a form in HTML & accept/reject the results.
"""
title = TextField('Title', [Length(max=255)])
start = DateTimeField('Start', [required()])
| end = DateTimeField('End')
allday = BooleanField('All Day')
location = TextField('Location', [Length(max=255)])
description = TextAreaField('Description')
class LoginForm(Form):
"""Render HTML input for user login form.
Authentication (i.e. password verification) happens in the view function.
"""
username = TextField('Username', [required()])
password = PasswordField('Password', [required()])
|
pyspace/test | pySPACE/tests/__init__.py | Python | gpl-3.0 | 191 | 0.010471 | """ Collection of | pySPACE system and unit tests
.. note::
The section with :mod:`~pySPACE.tests` is not really complete,
but there is a script to | run all unittests automatically.
""" |
eahneahn/free | bootstrap/gen.py | Python | agpl-3.0 | 1,091 | 0.001833 | # coding: utf-8
"""
Only use this if you want to update a bootstrapV.M | .py file to a newer virtualenv!
Usage:
/path/to/specific/version/of/python gen.py
"""
import sys
import virtualenv
EXTENSION = """
# coding: utf-8
import os
from os.path import abspath, basename, dirname, join, pardir
import subprocess
# get current dir
def adjust_options(options, args):
BOOTSTRAP_PATH = abspath(dirname(__file__))
# erase args
while len(args):
args.pop()
# set | virtualenv's dir
args.append(join(BOOTSTRAP_PATH, pardir))
# override default options
def extend_parser(parser):
parser.set_defaults(unzip_setuptools=True,
use_distribute=True)
# delegate the final hooks to an external script so we don't need to change this.
def after_install(options, home_dir):
from hooks import after_install
after_install(options, home_dir)
"""
# the below syntax works on both 2.6 and 2.7.
filename = "bootstrap{0}.{1}.py".format(*sys.version_info)
output = virtualenv.create_bootstrap_script(EXTENSION)
f = open(filename, 'w').write(output)
|
pmoravec/sos | tests/sos_tests.py | Python | gpl-2.0 | 37,144 | 0.001023 | # This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from avocado.core.exceptions import TestSkipError
from avocado.core.output import LOG_UI
from avocado import Test
from avocado.utils import archive, process, distro, software_manager
from fnmatch import fnmatch
import glob
import json
import os
import pickle
import shutil
import socket
import re
SOS_TEST_DIR = os.path.dirname(os.path.realpath(__file__))
SOS_REPO_ROOT = os.path.realpath(os.path.join(SOS_TEST_DIR, '../'))
SOS_PLUGIN_DIR = os.path.realpath(os.path.join(SOS_REPO_ROOT, 'sos/report/plugins'))
SOS_TEST_DATA_DIR = os.path.realpath(os.path.join(SOS_TEST_DIR, 'test_data'))
SOS_BIN = os.path.realpath(os.path.join(SOS_TEST_DIR, '../bin/sos'))
RH_DIST = ['rhel', 'centos', 'fedora']
UBUNTU_DIST = ['Ubuntu', 'debian']
def skipIf(cond, message=None):
def decorator(function):
def wrapper(self, *args, **kwargs):
if callable(cond):
if cond(self):
raise TestSkipError(message)
elif cond:
raise TestSkipError(message)
return wrapper
return decorator
def redhat_only(tst):
def wrapper(func):
if distro.detect().name not in RH_DIST:
raise TestSkipError('Not running on a Red Hat distro')
return wrapper
def ubuntu_only(tst):
def wrapper(func):
if distro.detect().name not in UBUNTU_DIST:
raise TestSkipError('Not running on a Ubuntu or Debian distro')
return wrapper
class BaseSoSTest(Test):
"""Base class for all our test classes to build off of.
Subclasses avocado.Test and then adds wrappers and helper methods that are
needed across sos components. Component specific test classes should in
turn subclass ``BaseSoSTest`` rather than ``avocado.Test`` directly
"""
_klass_name = None
_tmpdir = None
_exception_expected = False
sos_cmd = ''
sos_timeout = 300
redhat_only = False
ubuntu_only = False
end_of_test_case = False
@property
def klass_name(self):
if not self._klass_name:
self._klass_name = os.path.basename(__file__) + '.' + self.__class__.__name__
return self._klass_name
@property
def tmpdir(self):
if not self._tmpdir:
self._tmpdir = os.getenv('AVOCADO_TESTS_COMMON_TMPDIR') + self.klass_name
return self._tmpdir
def generate_sysinfo(self):
"""Collects some basic information about the system for later reference
in individual tests
"""
sysinfo = {}
# get kernel modules
mods = []
_out = process.run('lsmod').stdout.decode()
for line in _out.splitlines()[1:]:
mods.append(line.split()[0])
# this particular kmod is both innocuous and unpredictable in terms of
# pre-loading even within the same distribution. For now, turn a blind
# eye to it with regards to the "no new kmods loaded" perspective
if 'binfmt_misc' in mods:
mods.remove('binfmt_misc')
sysinfo['modules'] = sorted(mods, key=str.lower)
# get networking info
hostname = socket.gethostname()
ip_addr = socket.gethostbyname(hostname)
sysinfo['networking'] = {}
sysinfo['networking']['hostname'] = hostname
sysinfo['networking']['ip_addr'] = ip_addr
return sysinfo
def _generate_sos_command(self):
"""Based on the specific test class that is subclassing BaseSoSTest,
perform whatever logic is necessary to create the sos command that will
be executed
"""
raise NotImplementedError
def _execute_sos_cmd(self):
"""Run the sos command for this test case, and extract it
"""
exec_cmd = self._generate_sos_command()
try:
self.cmd_output = process.run(exec_cmd, timeout=self.sos_timeout,
env={'SOS_TEST_LOGS': 'keep'})
except Exception as err:
if not hasattr(err, 'result'):
# can't inspect the exception raised, just bail out
raise
if self._exception_expected:
self.cmd_output = err.result
else:
msg = err.result.stderr.decode() or err.result.stdout.decode()
# a little hacky, but using self.log methods here will not
# print to console unless we ratchet up the verbosity for the
# entire test suite, which will become very difficult to read
| LOG_UI.error('ERROR:\n' + msg[:8196]) # don't flood w/ super verbose logs
if err.result.interrupted:
raise Exception("Timeout exceeded, see output above")
else:
| raise Exception("Command failed, see output above: '%s'"
% err.command.split('bin/')[1])
with open(os.path.join(self.tmpdir, 'output'), 'wb') as pfile:
pickle.dump(self.cmd_output, pfile)
self.cmd_output.stdout = self.cmd_output.stdout.decode()
self.cmd_output.stderr = self.cmd_output.stderr.decode()
def _setup_tmpdir(self):
if not os.path.isdir(self.tmpdir):
os.mkdir(self.tmpdir)
def _write_file_to_tmpdir(self, fname, content):
"""Write the given content to fname within the test's tmpdir
"""
fname = os.path.join(self.tmpdir, fname)
if isinstance(content, bytes):
content = content.decode()
with open(fname, 'w') as wfile:
wfile.write(content)
def read_file_from_tmpdir(self, fname):
fname = os.path.join(self.tmpdir, fname)
try:
with open(fname, 'r') as tfile:
return tfile.read()
except Exception:
pass
return ''
def _write_sysinfo(self, fname):
"""Get the current state of sysinfo and write it into our shared
tempdir so it can be loaded in setUp() later
:param fname: The name of the file to be written in the tempdir
:type fname: ``str``
"""
sysinfo = self.generate_sysinfo()
self._write_file_to_tmpdir(fname, json.dumps(sysinfo))
def _read_sysinfo(self, fname):
sysinfo = {}
content = self.read_file_from_tmpdir(fname)
if content:
sysinfo = json.loads(content)
return sysinfo
def set_pre_sysinfo(self):
self._write_sysinfo('pre_sysinfo')
def get_pre_sysinfo(self):
return self._read_sysinfo('pre_sysinfo')
def set_post_sysinfo(self):
self._write_sysinfo('post_sysinfo')
def get_post_sysinfo(self):
return self._read_sysinfo('post_sysinfo')
def get_sysinfo(self):
sinfo = {
'pre': self.get_pre_sysinfo(),
'post': self.get_post_sysinfo()
}
return sinfo
def check_distro_for_enablement(self):
"""Check if the test case is meant only for a specific distro family,
and if it is and we are not running on that family, skip all the tests
for that test case.
This allows us to define distro-specific test classes much the same way
we can define distro-specific tests _within_ a test class using the
appropriate decorators. We can't use the decorators for the class however
due to how avocado catches instantiation exceptions, so instead we need
to raise the skip exception after instantiation is done.
"""
if self.redhat_only:
if self.local_distro not in RH_DIST:
raise TestSkipError('Not running on a Red Hat distro')
elif self.ubuntu_only:
if self.local_distro not in UBUNTU_DIST:
raise TestSkipError("Not running on a Ubuntu or Debian distro")
def setUp(self):
" |
subena-io/subena | package/alerts/main.py | Python | apache-2.0 | 391 | 0.015345 | #!/usr/local/bin/python2.7
# -*-coding:UTF-8-sig -*-
"""
Module for getting alerts according to learning program
Learning Algorithm - 2015 - Subena
"""
import package.learning.parameter as lear | ningParameter
def execute():
#calculate alerts according to the last values
#learningParameter.getProbaForLastMeasures(10)
learningParameter.getAlertsFro | mStats()
|
akarol/cfme_tests | cfme/scripting/link_config.py | Python | gpl-2.0 | 2,905 | 0.001721 | from __future__ import print_function
import errno
import os
import click
from pathlib2 import Path
def _is_yaml_file(path):
return path.suffix in ('.yaml', '.eyaml')
def _warn_on_unknown_encryption(path):
if path.suffix == '.eyaml' and not Path('.yaml_key').is_file():
print(
"WARNING:", path, "is encrypted, "
"please remember follow the documentation on yaml keys")
def _check_missmatching_symlink(src, target):
check_passed = True
if target.resolve() != src.resolve():
print(
"WARNING: Different symlink already exists for", target.name, "in your dest. "
"Skipped this file. Use --force to override.")
check_passed = False
return check_passed
def _check_existing_file(target):
check_passed = True
if target.is_file():
print(
"ERROR: File", target.name, "already exists in your dest. Skipped this file. "
"Use --force to override.")
check_passed = False
return check_passed
@click.command()
@click.argument('src', type=click.Path(file_okay=False))
@click.argument('dest', type=click.Path(exists=True, file_okay=False))
@click.option('--force', is_flag=True)
def main(src, dest, force):
"""links configfiles from one folder to another
if links exists it verifies content
if files exist at the target side it errors
Args:
src: source folder
dest: target folder
force: override existing symlinks
"""
src = Path(src)
if not src.exists():
print("WARNING:", src, "does not exist, skipping linking")
return
dest = Path(dest)
for element in filter(_is_yaml_file, src.iterdir()):
_warn_on_unknown_encryption(element)
target = dest.joinpath(element.name)
if force:
try:
target.symlink_to(element.resolve() | )
except OSError as e:
if e.errno == e | rrno.EEXIST:
backup_target = Path(dest.joinpath(element.name + "_bak"))
print("Replacing", target.name, "and saving backup as", backup_target.name)
# Would use 'backup_target.replace()' here but that's only supported in py3
if backup_target.exists():
os.remove(str(backup_target))
target.rename(backup_target)
target.symlink_to(element.resolve())
else:
raise
else:
if target.is_symlink():
# If symlink already exists and points to same src, do nothing.
_check_missmatching_symlink(src=element, target=target)
elif _check_existing_file(target):
target.symlink_to(element.resolve())
print("Symlink created for", target.name)
if __name__ == '__main__':
main()
|
mcgid/morenines | morenines/exceptions.py | Python | mit | 697 | 0.005739 | class MoreninesError(Exception):
"""The base class for unrecoverable errors during morenines execution."""
class PathError(MoreninesError):
"""Errors involving user-supplied path arguments."""
def __init__(self, message, path, *args):
self.path = path
super(PathError, self).__init__(message, *args)
| class RepositoryError(MoreninesError):
"""Errors in reading from or writing to a repository."""
class MoreninesWarning(Exception):
"""The base class for unexpected but recoverable situations during morenines execution."""
class NoEffectWarning(MoreninesWarning):
"""Situations where sp | ecific user input has yielded no change in repository state."""
|
stsewd/hellopy | hellopy/config.py | Python | mit | 201 | 0 | """ Global configurations
"""
import datetime
USER = 'user'
WIT_AI | _KEY = 'your wit key'
BIRTH = datetime.datetime(2016, 6, 22, 12, 0, 0)
LANGUAGE_UNICODE = 'es'
LANGUAGE = | 'Es-es'
RECORD_DURATION = 0
|
YangLuGitHub/Euler | src/scripts/Problem12.py | Python | mit | 4,196 | 0.00286 | # Highly divisible triangular number
# Problem 12
# The sequence of triangle numbers is generated by adding the natural numbers.
# So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
#
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#
# Let us list the factors of the first seven triangle numbers:
#
# 1: 1
# 3: 1,3
# 6: 1,2,3,6
# 10: 1,2,5,10
# 15: 1,3,5,15
# 21: 1,3,7,21
# 28: 1,2,4,7,14,28
#
# We can see that 28 is the first triangle number to have over five divisors.
#
# What is the value of the first triangle number to have over five hundred divisors?
#
# This can obviously be brute-forced, but that is suboptimal.
# Instead, recognize triangular numbers, due to being a sum of natural numbers up to n,
# are always of the form n(n+1)/2.
#
# This is much easier to factorize. Furthermore, if you write a number as a product
# of its x prime factors, in the form p_1^n_1 * p_2^n_2 * ... * (p_x)^(n_x), then
# the number of divisors equals exactly prod(n_i + 1), i from 1 to x, since every
# divisor must be some combination of the prime factors.
#
# 2^9 > 512, so not that many unique prime factors are necessary to exceed 500 total divisors.
# Nevertheless, sieve primes up to 100k, just to be safe.
#
# Optimization can be made by recognizing n and n+1 don't share any divisors except 1, and
# thus factoring them separately.
from Problem10 import optimized_sieve
MAX_N = 100000
def triangular_number_n(n):
return n * (n + 1) // 2
def run():
# log_file = open("logs/Problem12.txt", "w")
sieve = list(optimized_sieve(MAX_N))
# print("Sieve size is {0}".format(len(sieve)))
for n in range(1, MAX_N):
if n % 2 == 0:
factor_1 = n // 2
factor_2 = n + 1
else:
factor_1 = n
factor_2 = (n + 1) // 2
factor_count = 1
# print("Factorizing Triangular number {0} = {1} = {2} * {3}"
# .format(n, factor_1 * factor_2, factor_1, factor_2))
# for prime in sieve:
# exponent_count = 0
# while factor_1 % prime == 0:
# # print("Factorizing {0} by {1} = {2} * {1}".format(factor_1, prime, factor_1 // prime))
# exponent_count += 1
# factor_1 //= prime
# while factor_2 % prime == 0:
# # print("Factorizing {0} by {1} = {2} * {1}".format(factor_2, prime, factor_2 // prime))
# exponent_count += 1
# factor_2 //= prime
# if exponent_count > 0:
# factor_count *= (exponent_count + 1)
# # print("Factorized factors to {0} * {1} * {2}^{3}"
# # .format(factor_1, factor_2, prime, exponent_count))
# if factor_1 == factor_2 == 1:
# # print("Factorization complete.")
# # print("Total divisor count for T_{0} = {1} is {2}"
# # .format(n, triangular_number_n(n), factor_count), file=log_file)
# # input("Press Enter to continue...")
# break
for prime in sieve:
exponent_count = 1
while factor_1 % prime == 0:
exponent_count += 1
factor_1 //= prime
factor_count *= | exponent_count
if factor_1 == 1:
break
| if prime * prime > factor_1:
# Factor is now prime
factor_count *= 2
break
for prime in sieve:
exponent_count = 1
while factor_2 % prime == 0:
exponent_count += 1
factor_2 //= prime
factor_count *= exponent_count
if factor_2 == 1:
break
if prime * prime > factor_2:
# Factor is now prime
factor_count *= 2
break
if factor_count > 500:
print("Triangular number {0} = {1} has {2} factors".format(n, triangular_number_n(n), factor_count))
break
# Sample Output:
# Triangular number 12375 = 76576500 has 576 factors
#
# Total running time for Problem12.py is 0.13857281408917604 seconds
|
karanisverma/feature_langpop | librarian/menu/apps.py | Python | gpl-3.0 | 422 | 0 | """
apps. | py: apps menu item
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
from . import MenuItem
from bottle_utils.i18n import lazy_gettext as _
class AppsMenuItem(MenuItem):
label = _("Apps")
icon_class = | 'apps'
route = 'apps:list'
|
Donkyhotay/MoonPy | twisted/internet/threads.py | Python | gpl-3.0 | 3,490 | 0.000573 | # Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Extended thread dispatching support.
For basic support see reactor threading API docs.
Maintainer: Itamar Shtull-Trauring
"""
import Queue
from twisted.python import failure
from twisted.internet import defer
def deferToThreadPool(reactor, threadpool, f, *args, **kwargs):
"""
Call the function C{f} using a thread from the given threadpool and return
the result as a Deferred.
This function is only used by client code which is maintaining its own
threadpool. To run a function in the reactor's threadpool, use
C{deferToThread}.
@param reactor: The reactor in whose main thread the Deferred will be
invoked.
@param threadpool: An object which supports the C{callInThreadWithCallback}
method of C{twisted.python.threadpool.ThreadPool}.
@param f: The function to call.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f, or an
errback with a L{twisted.python.failure.Failure} if f throws an
e | xception.
"""
d = defer.Deferred()
def onResult(success, result):
if success:
reactor.callFromThread(d.callback, result)
else:
reactor.callFromThread(d.errback, result)
threadpool.callInThreadWithCallback(onResult, f, *args, **kwargs)
return d
def deferToThread(f, *args, **kwargs):
"""
Run a function in a thread and return the r | esult as a Deferred.
@param f: The function to call.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f,
or an errback with a L{twisted.python.failure.Failure} if f throws
an exception.
"""
from twisted.internet import reactor
return deferToThreadPool(reactor, reactor.getThreadPool(),
f, *args, **kwargs)
def _runMultiple(tupleList):
"""
Run a list of functions.
"""
for f, args, kwargs in tupleList:
f(*args, **kwargs)
def callMultipleInThread(tupleList):
"""
Run a list of functions in the same thread.
tupleList should be a list of (function, argsList, kwargsDict) tuples.
"""
from twisted.internet import reactor
reactor.callInThread(_runMultiple, tupleList)
def blockingCallFromThread(reactor, f, *a, **kw):
"""
Run a function in the reactor from a thread, and wait for the result
synchronously, i.e. until the callback chain returned by the function
get a result.
@param reactor: The L{IReactorThreads} provider which will be used to
schedule the function call.
@param f: the callable to run in the reactor thread
@type f: any callable.
@param a: the arguments to pass to C{f}.
@param kw: the keyword arguments to pass to C{f}.
@return: the result of the callback chain.
@raise: any error raised during the callback chain.
"""
queue = Queue.Queue()
def _callFromThread():
result = defer.maybeDeferred(f, *a, **kw)
result.addBoth(queue.put)
reactor.callFromThread(_callFromThread)
result = queue.get()
if isinstance(result, failure.Failure):
result.raiseException()
return result
__all__ = ["deferToThread", "callMultipleInThread", "blockingCallFromThread"]
|
jtraver/dev | python/subprocess/write2.py | Python | mit | 1,052 | 0.005703 | #!/usr/bin/python
import subprocess
import os
import shutil
import pty
master, slave = pty.openpty()
args = ('stdin1.py')
# popen = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='. | ')
# http://st | ackoverflow.com/questions/5411780/python-run-a-daemon-sub-process-read-stdout/5413588#5413588
# not working
popen = subprocess.Popen(args, shell=True, stdin=subprocess.PIPE, stdout=slave, stderr=slave, close_fds=True, cwd='.')
stdout = os.fdopen(master)
# set the O_NONBLOCK flag of p.stdout file descriptor:
# flags = fcntl(popen1.stdout, F_GETFL) # get current popen1.stdout flags
# fcntl(popen1.stdout, F_SETFL, flags | O_NONBLOCK)
popen.stdin.write("this is line 0\n")
# line = popen.stdout.readline()
# line = stdout.readline()
# print "line = %s" % line
out, err = popen.communicate(input = "this is line 1\nthis is line 2\n\d")
if err != None:
print 'errput = %s' % str(err)
print "output = %s" % str(out)
out2 = stdout.read()
print "output = %s" % str(out)
subprocess.call(['echo', ''])
|
Akelio-zhang/sxk-microblog | app/models.py | Python | bsd-3-clause | 3,738 | 0.002675 | from app import db
from app import app
from config import WHOOSH_ENABLED
import requests, json, re
import sys
if sys.version_info >= (3, 0):
enable_search = False
else:
enable_search = WHOOSH_ENABLED
if enable_search:
import flask.ext.whooshalchemy as whooshalchemy
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
steam_id = db.Column(db.String(40), index=True, unique=True)
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
followed = db.relationship('User',
se | condary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'),
lazy='dynamic')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
retu | rn unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def avatar(self, size='s'):
# my own steam info
if self.steam_id:
url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=090B8FBA169177A04FFB10777A0C2F41&steamids=' + self.steam_id
data = json.loads(requests.get(url).text)
if size == 'm':
s = data['response']['players'][0]['avatarmedium']
elif size == 'f':
s = data['response']['players'][0]['avatarfull']
else:
s = data['response']['players'][0]['avatar']
else:
s = '/static/img/default.jpg'
return s
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname=nickname).first() is None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname=new_nickname).first() is None:
break
version += 1
return new_nickname
@staticmethod
def make_valid_nickname(nickname):
return re.sub('[^a-zA-Z0-9_\.]', '', nickname)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
return self.followed.filter(followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id == self.id).order_by(Post.timestamp.desc())
def __repr__(self):
return '<User %r>' % (self.nickname)
class Post(db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self):
return '<Post %r>' % (self.body)
if enable_search:
whooshalchemy.whoosh_index(app, Post) |
albertz/music-player | mac/pyobjc-core/Lib/objc/_framework.py | Python | bsd-2-clause | 632 | 0.003165 | """
Generic framework path manipulation
"""
__all__ = ['infoForFramework']
# This regexp should find:
# \1 - framework location
# \2 - framework name
# \3 - framework version (optional)
#
FRAMEWORK_RE_STR = r"""(^.*)(?: | ^|/)(\w+).fram | ework(?:/(?:Versions/([^/]+)/)?\2)?$"""
FRAMEWORK_RE = None
def infoForFramework(filename):
"""returns (location, name, version) or None"""
global FRAMEWORK_RE
if FRAMEWORK_RE is None:
import re
FRAMEWORK_RE = re.compile(FRAMEWORK_RE_STR)
is_framework = FRAMEWORK_RE.findall(filename)
if not is_framework:
return None
return is_framework[-1]
|
luogangyi/bcec-nova | nova/tests/api/openstack/compute/contrib/test_server_external_events.py | Python | apache-2.0 | 6,660 | 0 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import webob
from nova.api.openstack.compute.contrib import server_external_events
from nova import context
from nova import exception
from nova.objects import instance as instance_obj
from nova import test
fake_instances = {
'00000000-0000-0000-0000-000000000001': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
@classmethod
def fake_get_by_uuid(cls, context, uuid):
try:
return fake_instances[uuid]
except KeyError:
raise exception.InstanceNotFound(instance_id=uuid)
@mock.patch('nova.objects.instance.Instance.get_by_uuid', fake_get_by_uuid)
class ServerExternalEventsTest(test.NoDBTestCase):
def setUp(self):
super(ServerExternalEventsTest, self).setUp()
self.api = server_external_events.ServerExternalEventsController()
self.context = context.get_admin_context()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0]}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_1['status'] = 'completed'
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
def _create_req(self, body):
req = webob.Request.blank('/v2/fake/os-server-external-events')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
req.body = json.dumps(body)
return req
def _assert_call(self, req, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(req, body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
for inst in api_method.call_args_list[0][0][1]:
expected_uuids.remove(inst.uuid)
self.assertEqual([], expected_uuids)
for event in api_method.call_args_list[0][0][2]:
expected_events.remove(event.name)
| self.assertEqual([], expected_events)
return result, code
def test_create(self):
req = self._create_req(self.default_body)
result, code = self._assert_call(req, | self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
req = self._create_req(body)
result, code = self._assert_call(req, body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
req = self._create_req(body)
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(req, body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, req, body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_bad_events(self):
body = {'events': 'foo'}
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
|
grongor/school_rfid | lib/nmap-6.40/zenmap/zenmapGUI/DiffCompare.py | Python | gpl-2.0 | 22,849 | 0.002495 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@insecure.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of | the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. | *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@insecure.com for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will |
ctrlspc/PyPrinterMonitor | pyPrinterMonitor/test.py | Python | gpl-3.0 | 507 | 0.005917 | '''
Created on Jan 11, 2012
@author: jjm20
'''
import pika
connection = pika.BlockingConnection(pika.Connectio | nParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='RT_Events_q')
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
channel.basic_consume(callback,
queue='RT_Events_q',
no_ack=True)
channel.start | _consuming() |
gautsi/pmareport | pmareport/predictors.py | Python | bsd-3-clause | 6,290 | 0.000159 | # -*- coding: utf-8 -*-
'''
The model used to predict appointment duration is a decision tree.
The model is evaluated by the precentage of predicted times that are within
a threshold (5 minutes by default) of the actual duration.
The class `DurationPredictor` splits the data into testing and training, builds
the model (using scikit-learn's implementation of decision tree)
and evaluates the model both on a cross validation split of the training set
and on the test set.
`DurationPredictor` also includes functionality to
turn non-integer categorical features into ints, which scikit-learn's decision
tree implementation requires.
'''
import pandas as pd
from sklearn import tree
from sklearn import cross_validation
import numpy as np
def read_data(fp='../data/pmadata.csv'):
'''
Read clinic data from a csv into a pandas dataframe.
:param str fp: the file path of the csv file
'''
return pd.read_csv(fp)
def percent_within(y_true, y_pred, thresh=5):
'''
Calculate the percentage of predictions are within
`thresh` of the true value.
:param array-like y_true: the true values
:param array-like y_pred: the predicted values
:param float thresh: the threshold for a close prediction
:returns:
the percent of predictions within the treshold from the true value
:rtype: float
'''
return np.sum(np.abs(y_true - y_pred) < thresh)/float(len(y_true))*100
class DurationPredictor(object):
'''
A model to predict the duration of an appointment.
For example, let's make a dataframe with random data in columns `feat1`
and `response`.
>>> df = pd.DataFrame(np.random.randn(30,2), columns=['feat1', 'response'])
We add a column `feat2` with categorical values ('a' or 'b').
>>> df['feat2'] = np.random.choice(['a', 'b'], 30)
Let's make a `DurationPredictor` object from our example dataframe.
>>> dec_pred = DurationPredictor(
... df=df,
... feat_cols=['feat1', 'feat2'],
... response_col='response'
... )
To turn `feat2` into a column of ints
(which scikit-learn's decision tree implementation requires),
we use `make_int`.
>>> dec_pred.make_int(col='feat2')
We split our data set into train and test with 10% left out to test.
>>> dec_pred.train_test(test_size=0.1)
Now let's make the model, a decision tree of maximum depth 3,
and get its average score on a 10-fold cross validation split.
The score is the percentage of predictions within 5 minutes
of the acutal value.
>>> dec_pred.make_model(max_depth=3)
>>> cv_score = dec_pred.cv_evalution(thresh=5)
>>> cv_score >= 0 and cv_score <= 100
True
Fit the model on the full training set and evaluate it on the test set.
>>> test_score = dec_pred.fit()
>>> test_score >= 0 and test_score <= 100
True
:param dataframe df: the data
:param list feat_cols: a list of the names of the feature columns
:param str response_col: the name of the response column
'''
def __init__(self, df, feat_cols, response_col):
self.df = df
self.feat_cols = feat_cols
self.response_col = response_col
self.int_funcs = {}
def make_int(self, col):
'''
Encode categorical variables of type other than int
as ints for input into the decision tree.
:param str col: the name of the column with categorical values
'''
categories = list(set(self.df[col]))
int_func = lambda x: categories.index(x)
self.df[col+'i'] = self.df[col].apply(int_func)
self.feat_cols.remove(col)
self.feat_cols.append(col+'i')
self.int_funcs[col] = int_func
def train_test(self, test_size=0.1):
'''
Split the data into train and test sets.
:param float test_size: the percentage of rows to leave out as test
'''
self.train, self.test = cross_validation.train_test_split(
self.df,
test_size=test_size
)
self.Xtrain = self.train[self.feat_cols]
self.ytrain = self.train[self.response_col]
self.Xtest = self.test[self.feat_cols]
self.ytest = self.test[self.response_col]
def make_model(self, max_depth=3):
'''
Make the model, a decision tree with maximum depth `max_depth`.
:param max_depth: the maximum depth of the decision tree
'''
self.model = tree.DecisionTreeRegressor(max_depth=max_depth)
def cv_evalution(self, n_folds=10, thresh=5):
'''
Evaluate the model on a cross valdation split
of the training data with `n_folds` nmber of folds.
The metric is the percent of predictions within `thresh`
of the true value.
:param int n_folds: the number of folds for the cross validation
:param float thresh:
the threshold for considering a prediction close to the true value
:returns: the average of metric values over the folds
:rtype: float
'''
cv = cross_validation.KFold(len(self.train), n_folds=n_folds)
score_list = []
for train, test in cv:
cvXtrain = self.Xtrain.iloc[train]
cvXtest = self.Xtrain.iloc[test]
cvytrain = self.ytrain.iloc[train]
cvytest = self.ytrain.iloc[test]
self.model.fit(cvXtrain, cvytrain)
pred = self.model.predict(cvXtest)
score = percent_within(y_true=cvytest, y_pred= | pred, thresh=5)
score_list.append(score)
return np.mean(score_list)
def fit(self, thresh=5):
'''
Fit the model on the training set and evaluate it
on the test set. The metric is the percent of
predictions within `thresh` of the true value.
:param float thresh:
the | threshold for considering a prediction close to the true value
:returns: the score of the model on the test set
:rtype: float
'''
self.model.fit(self.Xtrain, self.ytrain)
predictions = self.model.predict(self.Xtest)
score = percent_within(
y_true=self.ytest,
y_pred=predictions,
thresh=thresh
)
return score
|
dolph/python-keystoneclient | keystoneclient/v2_0/tokens.py | Python | apache-2.0 | 1,387 | 0 | from keystoneclient import base
class Token(base.Resource):
def __repr__(self):
return "<Token %s>" % self._info
@property
def id(se | lf):
return self._info['token']['id']
@property
def expires(self):
return self._info['token']['expires']
@property
def tenant(self):
return self._info['token'].get('tenant', None)
class TokenManager(base.ManagerWithFind):
resource_class = Token
def authenticate(self, username=None, tenant_id=None, tenant_name=None,
| password=None, token=None, return_raw=False):
if token:
params = {"auth": {"token": {"id": token}}}
elif username and password:
params = {"auth": {"passwordCredentials": {"username": username,
"password": password}}}
else:
raise ValueError('A username and password or token is required.')
if tenant_id:
params['auth']['tenantId'] = tenant_id
elif tenant_name:
params['auth']['tenantName'] = tenant_name
return self._create('/tokens', params, "access", return_raw=return_raw)
def delete(self, token):
return self._delete("/tokens/%s" % base.getid(token))
def endpoints(self, token):
return self._get("/tokens/%s/endpoints" % base.getid(token), "token")
|
lucventurini/mikado | Mikado/subprograms/pick.py | Python | lgpl-3.0 | 23,916 | 0.005812 | #!/usr/bin/env python3
# coding: utf-8
"""Launcher of the Mikado pick step."""
import argparse
import re
import sys
import os
from typing import Union, Dict
from ._utils import check_log_settings_and_create_logger, _set_pick_mode
import marshmallow
from ..configuration import DaijinConfiguration, MikadoConfiguration
from ..exceptions import InvalidConfiguration
from ..utilities.log_utils import create_default_logger, create_null_logger
from ..utilities import to_region, percentage
from ..utilities import IntervalTree, Interval
from ..configuration.configurator import load_and_validate_config
from ..picking import Picker
def _parse_regions(regions_string: Union[None,str]) -> Union[None, Dict[str, IntervalTree]]:
if regions_string is None:
return None
regions = dict()
if os.path.exists(regions_string):
with open(regions_string) as f_regions:
for counter, line in enumerate(f_regions, start=1):
try:
chrom, start, end = to_region(line)
except ValueError:
raise ValueError(f"Invalid region line, no. {counter}: {line}")
if chrom not in regions:
regions[chrom] = IntervalTree()
regions[chrom].add(Interval(start, end))
else:
chrom, start, end = to_region(regions_string)
regions[chrom] = IntervalTree.from_intervals([Interval(start, end)])
return regions
def _set_pick_output_options(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.output_format.source = args.source if args.source is not None else conf.pick.output_format.source
conf.pick.output_format.id_prefix = args.prefix if args.prefix is not None else conf.pick.output_format.id_prefix
conf.pick.output_format.report_all_external_metrics = True if args.report_all_external_metrics is True else \
conf.pick.output_format.report_all_external_metrics
conf.pick.output_format.report_all_orfs = True if args.report_all_orfs is True else \
conf.pick.output_format.report_all_orfs
conf.pick.files.log = args.log if args.log else conf.pick.files.log
pat = re.compile(r"\.(gff3|gff)")
if args.loci_out:
conf.pick.files.loci_out = args.loci_out if pat.search(args.loci_out) else "{0}.gff3".format(args.loci_out)
if args.monoloci_out:
conf.pick.files.monoloci_out = args.monoloci_out if pat.search(args.monoloci_out) else "{0}.gff3".format(
args.monoloci_out)
if args.subloci_out:
conf.pick.files.subloci_out = args.subloci_out if pat.search(args.subloci_out) else "{0}.gff3".format(
args.subloci_out)
return conf
def _set_pick_run_options(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.run_options.single_thread = args.single
conf.pick.run_options.exclude_cds = True if args.no_cds is True else conf.pick.run_options.exclude_cds
conf.pick.run_options.intron_range = tuple(sorted(args.intron_range)) if args.intron_range is not None \
else conf.pick.run_options.intron_range
conf.pick.run_options.shm = True if args.shm is not None else conf.pick.run_options.shm
if args.only_reference_update is True:
conf.pick.run_options.only_reference_update = True
conf.pick.run_options.reference_update = True
conf.pick.run_options.reference_update = True if args.reference_update is True else \
conf.pick.run_options.reference_update
conf.pick.run_options.check_references = True if args.check_references is True else \
conf.pick.run_options.check_references
return conf
def _set_pick_clustering_options(conf: Union[DaijinConfiguration, MikadoConfiguration],
args) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.clustering.purge = False if args.no_purge is True else conf.pick.clustering.purge
conf.pick.clustering.flank = args.flank if args.flank is not None else conf.pick.clustering.flank
conf.pick.clustering.min_cds_overlap = args.min_clustering_cds_overlap if \
args.min_clustering_cds_overlap else conf.pick.clustering.min_cds_overlap
conf.pick.clustering.cds_only = True if args.cds_only else conf.pick.clustering.cds_only
if args.min_clustering_cdna_overlap is not None:
conf.pick.clustering.min_cdna_overlap = args.min_clustering_cdna_overlap
if args.min_clustering_cds_overlap is None:
conf.pick.clustering.min_cds_overlap = args.min_clustering_cdna_overlap
return conf
def _set_pick_as_options(conf: Union[DaijinConfiguration, MikadoConfiguration],
args) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.alternative_splicing.pad = args.pad if args.pad is True else \
conf.pick.alternative_splicing.pad
conf.pick.alternative_splicing.ts_max_splices = True if args.pad_max_splices \
else conf.pick.alternative_splicing.ts_max_splices
conf.pick.alternative_splicing.ts_distance = True if args.pad_max_distance is not None else \
conf.pick.alternative_splicing.ts_distance
conf.pick.alternative_splicing.cds_only = True if args.as_cds_only is True else \
conf.pick.alternative_splicing.cds_only
c | onf.pick.alternative_splicing.keep_cds_disrupted_by_ri = True if args.keep_disrupted_cds is True \
else conf.pick.alternative_splicing.keep_cds_disrupted_by_ri
conf.pick.alternative_splicing.keep_retained_introns = False if args.exclud | e_retained_introns is True else \
conf.pick.alternative_splicing.keep_retained_introns
return conf
def _set_conf_values_from_args(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.multiprocessing_method = args.start_method if args.start_method else conf.multiprocessing_method
conf.threads = args.procs if args.procs is not None else conf.threads
if args.random_seed is True:
conf.seed = None
elif args.seed is not None:
conf.seed = args.seed
else:
pass
conf.pick.scoring_file = args.scoring_file if args.scoring_file is not None else conf.pick.scoring_file
conf.prepare.max_intron_length = args.max_intron_length if args.max_intron_length is not None else \
conf.prepare.max_intron_length
conf.serialise.codon_table = str(args.codon_table) if args.codon_table not in (False, None, True) \
else conf.serialise.codon_table
conf = _set_pick_output_options(conf, args)
conf = _set_pick_mode(conf, args.mode)
conf = _set_pick_run_options(conf, args)
conf = _set_pick_clustering_options(conf, args)
conf = _set_pick_as_options(conf, args)
try:
conf = load_and_validate_config(conf, logger=logger)
except marshmallow.exceptions.MarshmallowError as exc:
logger.critical("Invalid options specified for the configuration: {}".format(exc))
raise exc
return conf
def _check_db(conf: Union[MikadoConfiguration, DaijinConfiguration], args,
logger=create_null_logger()) -> Union[MikadoConfiguration, DaijinConfiguration]:
logger.debug("Checking the database")
if args.sqlite_db is not None:
if not os.path.exists(args.sqlite_db):
exc = InvalidConfiguration(f"Mikado database {args.sqlite_db} not found. Exiting.")
logger.critical(exc)
raise exc
logger.debug(f"Setting the database from the CLI to {args.sqlite_db}")
conf.db_settings.db = args.sqlite_db
conf.db_settings.dbtype = "sqlite"
if conf.db_settings.dbtype == "sqlite":
raw = conf.db_settings.db
db_basename = os.path.basename(conf.db_settings.db)
__compound = os.path.join(conf.pick.files.output_dir, db_basename)
__base = os.path.join(conf.pick.files.output_dir, db_basename)
|
serzans/wagtail | wagtail/wagtailsearch/tests/test_elasticsearch_backend.py | Python | bsd-3-clause | 38,709 | 0.003126 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
import datetime
import json
import mock
from elasticsearch.serializer import JSONSerializer
from django.test import TestCase
from django.db.models import Q
from wagtail.wagtailsearch.backends import get_search_backend
from wagtail.wagtailsearch.backends.elasticsearch import (
ElasticSearch,
ElasticSearchMapping,
ElasticSearchResults,
ElasticSearchQuery,
ElasticSearchAtomicIndexRebuilder,
)
from wagtail.tests.search import models
from .test_backends import BackendTests
class TestElasticSearchBackend(BackendTests, TestCase):
backend_path = 'wagtail.wagtailsearch.backends.elasticsearch'
def test_search_with_spaces_only(self):
# Search for some space characters and hope it doesn't crash
results = self.backend.search(" ", models.SearchTest)
# Queries are lazily evaluated, force it to run
list(results)
# Didn't crash, yay!
def test_filter_on_non_filterindex_field(self):
# id is not listed in the search_fields for SearchTest; this should raise a FieldError
from wagtail.wagtailsearch.backends.base import FieldError
with self.assertRaises(FieldError):
list(self.backend.search("Hello", models.SearchTest, filters=dict(id=42)))
def test_filter_with_unsupported_lookup_type(self):
from wagtail.wagtailsearch.backends.base import FilterError
with self.assertRaises(FilterError):
list(self.backend.search("Hello", models.SearchTest, filters=dict(title__iregex='h(ea)llo')))
def test_partial_search(self):
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
self.backend.add_type(models.SearchTestChild)
# Add some test data
obj = models.SearchTest()
obj.title = "HelloWorld"
obj.live = True
obj.save()
self.backend.add(obj)
# Refresh the index
self.backend.refresh_index()
# Search and check
results = self.backend.search("HelloW", models.SearchTest.objects.all())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, obj.id)
def test_child_partial_search(self):
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
self.backend.add_type(models.SearchTestChild)
obj = models.SearchTestChild()
obj.title = "WorldHello"
obj.subtitle = "HelloWorld"
obj.live = True
obj.save()
self.backend.add(obj)
# Refresh the index
self.backend.refresh_index()
# Search and check
results = self.backend.search("HelloW", models.SearchTest.objects.all())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, obj.id)
def test_ascii_folding(self):
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
self.backend.add_type(models.SearchTestChild)
# Add some test data
obj = models.SearchTest()
obj.title = "Ĥéllø"
obj.live = True
obj.save()
self.backend.add(obj)
# Refresh the index
self.backend.refresh_index()
# Search and check
results = self.backend.search("Hello", models.SearchTest.objects.all())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, obj.id)
def test_query_analyser(self):
"""
This is testing that fields that use edgengram_analyzer as their index analyser do not
have it also as their query analyser
"""
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
self.backend.add_type(models.SearchTestChild)
# Add some test data
obj = models.SearchTest()
obj.title = "Hello"
obj.live = True
obj.save()
self.backend.add(obj)
# Refresh the index
self.backend.refresh_index()
# Test search for "Hello"
results = self.backend.search("Hello", models.SearchTest.objects.all())
# Should find the result
self.assertEqual(len(results), 1)
# Test search for "Horse"
results = self.backend.search("Horse", models.SearchTest.objects.all())
# Even though they both start with the letter "H". This should not be considered a match
self.assertEqual(len(results), 0)
def test_search_with_hyphen(self):
"""
This tests that punctuation characters are treated the same
way in both indexing and querying.
See: https://github.com/torchbox/wagtail/issues/937
"""
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
self.backend.add_type(models.SearchTestChild)
# Add some test data
obj = models.SearchTest()
obj.title = "Hello-World"
obj.live = True
obj.save()
self.backend.add(obj)
# Refresh the index
self.backend.refresh_index()
# Test search for "Hello-World"
results = self.backend.search("Hello-World", models.SearchTest.objects.all())
# Should find the result
self.assertEqual(len(results), 1)
def test_custom_ordering(self):
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
# Add some test data
# a is more relevant, but b is more recent
a = models.SearchTest()
a.title = "Hello Hello World"
a.live = True
a.published_date = datetime.date(2015, 10, 11)
a.save()
self.backend.add(a)
b = models.SearchTest()
b.title = "Hello World"
b.live = True
b.published_date = datetime.date(2015, 10, 12)
b.save()
self.backend.add(b)
# Refresh the index
self.backend.refresh_index()
# Do a search ordered by relevence
results = self.backend.search("Hello", models.SearchTest.objects.all())
self.assertEqual(list(results), [a, b])
# Do a search ordered by published date
results = self.backend.search("Hello", models.SearchTest.objects.order_by('-published_date'), order_by_relevance=False)
self.assertEqual(list(results), [b, a])
def test_and_operator_with_single_field(self):
# Testing for bug #1859
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
a = models.SearchTest()
a.title = "Hello World"
a.live = True
a.published_date = datetime.date(2015, 10, 12)
a.save()
self.backend.add(a)
# Refresh the index
self.backend.refresh_index()
# Run query with "and" operator and single field
results = self.backend.search("Hello World", models.SearchTest, operator='and', fields=['title'])
self.assertEqual(list(results), [a])
class | TestElasticSearchQuery(TestCase):
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(json.dumps(a, sort_key | s=True, default=default), json.dumps(b, sort_keys=True, default=default))
def test_simple(self):
# Create a query
query = ElasticSearchQuery(models.SearchTest.objects.all(), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'prefix': {'content_type': 'searchtests_searchtest'}}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_none_query_string(self):
# Create a query
query = ElasticSearchQuery(models.SearchTest.objects.all(), None)
# Check it
expected_result = {'filtered': {'filter': {'prefix': {'content_type': 'searchtests_searchtest'}}, 'query': {'match_all': {}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_and_operator(s |
MOOCworkbench/MOOCworkbench | pylint_manager/tests.py | Python | mit | 2,789 | 0.003586 | import os
from unittest.mock import patch
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from experiments_manager.models import ChosenExperimentSteps, Experiment
from git_manager.models import GitRepository
from user_manager.models import WorkbenchUser
from .utils import run_pylint
from .models import PylintResult, PylintScanResult
class PylintManagerTestCase(TestCase):
def setUp(self):
call_command('loaddata', 'fixtures/steps.json', verbosity=0)
call_command('loaddata', 'fixtures/measures.json', verbosity=0)
call_command('loaddata', 'fixtures/package_categories_languages.json', verbosity=0)
call_command('loaddata', 'fixtures/cookiecutter.json', verbosity=0)
self.user = User.objects.create_user('test', 'test@test.nl', 'test')
self.workbench_user = WorkbenchUser.objects.get(user=self.user)
self.git_repo = GitRepository.objects.create(name='Workbench-Acceptance-Experiment',
owner=self.workbench_user,
| github_url='https://github.com/jlmdegoede/Workbench-Acceptance-Experiment.git')
self.experiment = Experiment.objects.create(title='Experiment',
description='test',
owner=self.wo | rkbench_user,
git_repo=self.git_repo,
language_id=1,
template_id=2)
self.chosen_experiment_step = ChosenExperimentSteps.objects.create(step_id=1,
experiment=self.experiment,
step_nr=1,
active=True,
location='/src/data/')
@patch('git_manager.helpers.github_helper.GitHubHelper._get_social_token')
def test_run_pylint(self, mock_social_token):
"""Test if to gather pylint results from a GitHub repository"""
github_token = os.environ.get('GITHUB_TOKEN')
mock_social_token.return_value = github_token
run_pylint(self.experiment)
pylint_scan = self.experiment.pylint
pylint_scan_result = PylintScanResult.objects.filter(for_project=pylint_scan)
pylint_results = PylintResult.objects.filter(for_result=pylint_scan_result)
self.assertIsNotNone(pylint_results)
self.assertIsNotNone(pylint_results)
|
xtao/code | tests/webtests/api/test_follow.py | Python | bsd-3-clause | 2,864 | 0 | # encoding: utf-8
from .base import APITestCase
from vilya.models.user import User
class FollowTest(APITestCase):
def setUp(self):
super(FollowTest, self).setUp()
user_name1 = 'zhangchi'
email = '%s@douban.com' % user_name1
self.zhangchi = User(user_name1, email)
user_name2 = 'lijunpeng'
email = '%s@douban.com' % user_name2
self.lijunpeng = User(user_name2, email)
self.api_token_zhangchi = self.create_api_token('zhangchi')
self.api_token_lijunpeng = self.create_api_token('lijunpeng')
def test_get_auth_user_following(self):
self.zhangchi.follow('qingfeng')
self.zhangchi.follow('lisong_intern')
self.zhangchi.follow('xingben')
ret = self.app.get(
"/api/user/following/",
status=200
).json
self.assertEquals(ret, [])
ret = self.app.get(
"/api/user/following/",
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status=200
).json
self.assertEquals(len(ret), 3)
user_name_list = map(lambda x: x['username'], ret)
self.assertTrue('xingben' in user_name_list)
User('test1', 'test1@douban.com').follow('zhangchi')
User('test2', 'test2@douban.com').follow('zhangchi')
ret = self.app.get(
"/api/user/followers",
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status=200
).json
self.assertEquals(len(ret), 2)
user_name_list = map(lambda x: x['username'], ret)
self.assertTrue('test1' in user_name_list)
def test_follow(self):
self.app.get(
"/api/user/following/%s/" % (self.lijunpeng.username),
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status=404
)
self.app.put(
"/api/user/following/%s/" % (self.lijunpeng.username),
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status=204
)
self.app.get(
"/api/user/following/%s/" % | (self.lijunpeng.username),
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status=204
)
self.app.delete(
"/api/user/following/%s/" % (self.lijunpeng.username),
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status | =204
)
self.app.get(
"/api/user/following/%s/" % (self.lijunpeng.username),
headers=dict(
Authorization="Bearer %s" % self.api_token_zhangchi.token),
status=404
)
|
genialis/resolwe-bio | resolwe_bio/filters.py | Python | apache-2.0 | 4,334 | 0.002769 | """.. Ignore pydocstyle D400.
===================
Resolwe Bio Filters
===================
"""
import django_filters as filters
from rest_framework import exceptions
from resolwe.flow.filters import CollectionFilter, DataFilter, EntityFilter
from resolwe_bio.models import Sample
class BioCollectionFilter(CollectionFilter):
"""Filter the collection endpoint.
Enable filtering collections by the entity.
.. IMPORTANT::
:class:`CollectionViewSet` must be patched before using it in
urls to enable this feature:
.. code:: python
CollectionViewSet.filter_class = BioCollectionFilter
"""
| sample = filters.ModelChoiceFilter(
field_name="entity", queryset=Sample.objects.all()
)
class BioEntityFilter(EntityFilter):
"""Filter the entity e | ndpoint.
Enable filtering collections by the entity.
.. IMPORTANT::
:class:`EntityViewSet` must be patched before using it in
urls to enable this feature:
.. code:: python
EntityViewSet.filter_class = BioEntityFilter
"""
descriptor__subject_information__sample_label__icontains = filters.CharFilter(
field_name="descriptor__subject_information__sample_label",
lookup_expr="icontains",
)
descriptor__subject_information__subject_id__icontains = filters.CharFilter(
field_name="descriptor__subject_information__subject_id",
lookup_expr="icontains",
)
descriptor__subject_information__batch__exact = filters.CharFilter(
field_name="descriptor__subject_information__batch",
method="filter_exact_number",
)
descriptor__subject_information__group__iexact = filters.CharFilter(
field_name="descriptor__subject_information__group", lookup_expr="iexact"
)
descriptor__disease_information__disease_type__icontains = filters.CharFilter(
field_name="descriptor__disease_information__disease_type",
lookup_expr="icontains",
)
descriptor__disease_information__disease_status__iexact = filters.CharFilter(
field_name="descriptor__disease_information__disease_status",
lookup_expr="iexact",
)
descriptor__immuno_oncology_treatment_type__io_drug__iexact = filters.CharFilter(
field_name="descriptor__immuno_oncology_treatment_type__io_drug",
lookup_expr="iexact",
)
descriptor__immuno_oncology_treatment_type__io_treatment__iexact = (
filters.CharFilter(
field_name="descriptor__immuno_oncology_treatment_type__io_treatment",
lookup_expr="iexact",
)
)
descriptor__response_and_survival_analysis__confirmed_bor__iexact = (
filters.CharFilter(
field_name="descriptor__response_and_survival_analysis__confirmed_bor",
lookup_expr="iexact",
)
)
descriptor__response_and_survival_analysis__pfs_event__iexact = filters.CharFilter(
field_name="descriptor__response_and_survival_analysis__pfs_event",
lookup_expr="iexact",
)
descriptor__general__description__icontains = filters.CharFilter(
field_name="descriptor__general__description", lookup_expr="icontains"
)
descriptor__general__biosample_source__icontains = filters.CharFilter(
field_name="descriptor__general__biosample_source", lookup_expr="icontains"
)
descriptor__general__biosample_treatment__icontains = filters.CharFilter(
field_name="descriptor__general__biosample_treatment", lookup_expr="icontains"
)
def filter_exact_number(self, queryset, name, value):
"""Transform value into an integer and filter by exact value."""
try:
value = int(value)
except ValueError:
raise exceptions.ParseError(f"Value of attribute {name} must be a number.")
return queryset.filter(**{name: value})
class BioDataFilter(DataFilter):
"""Filter the data endpoint.
Enable filtering data by the sample.
.. IMPORTANT::
:class:`DataViewSet` must be patched before using it in urls to
enable this feature:
.. code:: python
DataViewSet.filter_class = BioDataFilter
"""
sample = filters.ModelChoiceFilter(
field_name="entity", queryset=Sample.objects.all()
)
|
rlbabyuk/integration_tests | cfme/tests/configure/test_rest_access_control.py | Python | gpl-2.0 | 16,442 | 0.001095 | # -*- coding: utf-8 -*-
import fauxfactory
import py | test
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.configure.access_control import User
from cfme.login impor | t login, login_admin
from cfme.rest.gen_data import groups as _groups
from cfme.rest.gen_data import roles as _roles
from cfme.rest.gen_data import tenants as _tenants
from cfme.rest.gen_data import users as _users
from utils import error
from utils.wait import wait_for
pytestmark = [
test_requirements.auth
]
class TestTenantsViaREST(object):
@pytest.fixture(scope="function")
def tenants(self, request, appliance):
num_tenants = 3
response = _tenants(request, appliance.rest_api, num=num_tenants)
assert appliance.rest_api.response.status_code == 200
assert len(response) == num_tenants
return response
@pytest.mark.tier(3)
def test_create_tenants(self, appliance, tenants):
"""Tests creating tenants.
Metadata:
test_flag: rest
"""
for tenant in tenants:
record = appliance.rest_api.collections.tenants.get(id=tenant.id)
assert appliance.rest_api.response.status_code == 200
assert record.name == tenant.name
@pytest.mark.tier(2)
@pytest.mark.parametrize("multiple", [False, True], ids=["one_request", "multiple_requests"])
def test_edit_tenants(self, appliance, tenants, multiple):
"""Tests editing tenants.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.tenants
tenants_len = len(tenants)
new = []
for _ in range(tenants_len):
new.append(
{'name': 'test_tenants_{}'.format(fauxfactory.gen_alphanumeric().lower())})
if multiple:
for index in range(tenants_len):
new[index].update(tenants[index]._ref_repr())
edited = collection.action.edit(*new)
assert appliance.rest_api.response.status_code == 200
else:
edited = []
for index in range(tenants_len):
edited.append(tenants[index].action.edit(**new[index]))
assert appliance.rest_api.response.status_code == 200
assert tenants_len == len(edited)
for index in range(tenants_len):
record, _ = wait_for(
lambda: collection.find_by(name=new[index]['name']) or False,
num_sec=180,
delay=10,
)
assert record[0].id == edited[index].id
assert record[0].name == edited[index].name
@pytest.mark.tier(3)
@pytest.mark.parametrize("method", ["post", "delete"], ids=["POST", "DELETE"])
def test_delete_tenants_from_detail(self, appliance, tenants, method):
"""Tests deleting tenants from detail.
Metadata:
test_flag: rest
"""
status = 204 if method == "delete" else 200
for tenant in tenants:
tenant.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == status
with error.expected("ActiveRecord::RecordNotFound"):
tenant.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
def test_delete_tenants_from_collection(self, appliance, tenants):
"""Tests deleting tenants from collection.
Metadata:
test_flag: rest
"""
appliance.rest_api.collections.tenants.action.delete(*tenants)
assert appliance.rest_api.response.status_code == 200
with error.expected("ActiveRecord::RecordNotFound"):
appliance.rest_api.collections.tenants.action.delete(*tenants)
assert appliance.rest_api.response.status_code == 404
class TestRolesViaREST(object):
@pytest.fixture(scope="function")
def roles(self, request, appliance):
num_roles = 3
response = _roles(request, appliance.rest_api, num=num_roles)
assert appliance.rest_api.response.status_code == 200
assert len(response) == num_roles
return response
@pytest.mark.tier(3)
def test_create_roles(self, appliance, roles):
"""Tests creating roles.
Metadata:
test_flag: rest
"""
for role in roles:
record = appliance.rest_api.collections.roles.get(id=role.id)
assert appliance.rest_api.response.status_code == 200
assert record.name == role.name
@pytest.mark.tier(2)
@pytest.mark.parametrize("multiple", [False, True], ids=["one_request", "multiple_requests"])
def test_edit_roles(self, appliance, roles, multiple):
"""Tests editing roles.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.roles
roles_len = len(roles)
new = []
for _ in range(roles_len):
new.append(
{'name': 'test_role_{}'.format(fauxfactory.gen_alphanumeric())})
if multiple:
for index in range(roles_len):
new[index].update(roles[index]._ref_repr())
edited = collection.action.edit(*new)
assert appliance.rest_api.response.status_code == 200
else:
edited = []
for index in range(roles_len):
edited.append(roles[index].action.edit(**new[index]))
assert appliance.rest_api.response.status_code == 200
assert roles_len == len(edited)
for index in range(roles_len):
record, _ = wait_for(
lambda: collection.find_by(name=new[index]['name']) or False,
num_sec=180,
delay=10,
)
assert record[0].id == edited[index].id
assert record[0].name == edited[index].name
@pytest.mark.tier(3)
@pytest.mark.parametrize("method", ["post", "delete"], ids=["POST", "DELETE"])
def test_delete_roles_from_detail(self, appliance, roles, method):
"""Tests deleting roles from detail.
Metadata:
test_flag: rest
"""
status = 204 if method == "delete" else 200
for role in roles:
role.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == status
with error.expected("ActiveRecord::RecordNotFound"):
role.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
def test_delete_roles_from_collection(self, appliance, roles):
"""Tests deleting roles from collection.
Metadata:
test_flag: rest
"""
appliance.rest_api.collections.roles.action.delete(*roles)
assert appliance.rest_api.response.status_code == 200
with error.expected("ActiveRecord::RecordNotFound"):
appliance.rest_api.collections.roles.action.delete(*roles)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
def test_add_delete_role(self, appliance):
"""Tests adding role using "add" action and deleting it.
Metadata:
test_flag: rest
"""
role_data = {"name": "role_name_{}".format(format(fauxfactory.gen_alphanumeric()))}
role = appliance.rest_api.collections.roles.action.add(role_data)[0]
assert appliance.rest_api.response.status_code == 200
assert role.name == role_data["name"]
wait_for(
lambda: appliance.rest_api.collections.roles.find_by(name=role.name) or False,
num_sec=180,
delay=10,
)
found_role = appliance.rest_api.collections.roles.get(name=role.name)
assert found_role.name == role_data["name"]
role.action.delete()
assert appliance.rest_api.response.status_code == 200
with error.expected("ActiveRecord::RecordNotFound"):
role.action.delete()
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
def t |
UmSenhorQualquer/d-track | dolphintracker/singlecam_tracker/pool_camera.py | Python | mit | 3,437 | 0.045388 | import cv2, numpy as np
from dolphintracker.singlecam_tracker.camera_filter.FindDolphin import SearchBlobs
from dolphintracker.singlecam_tracker.camera_filter.BackGroundDetector import BackGroundDetector
import datetime
class PoolCamera(object):
def __init__(self, videofile, name, scene, maskObjectsNames, filters, frames_range=None):
self.name = name
self.videoCap = cv2.VideoCapture(videofile)
self.scene = scene
self.filters = filters
self.mask = self.create_mask(maskObjectsNames)
self.frames_range = frames_range
self._searchblobs = SearchBlobs()
self._backgrounds = []
self._last_centroid = None
if self.frames_range is not None:
self.videoCap.set(cv2.CAP_PROP_POS_FRAMES, self.frames_range[0])
print('set first frame', self.frames_range)
self._total_frames = self.videoCap.get(7)
self._colors = [(255,0,0),(0,255,0),(0,0,255)]
def create_mask(self, objectsNames):
mask = np.zeros((self.img_height,self.img_width), np.uint8)
for objname in objectsNames:
obj = self.scene.getObject(objname)
ptsProjection = self.points_projection( [p for p in obj.points if p[2]<0.2] )
hull = cv2.convexHull(np.int32(ptsProjection))
cv2.fillPoly(mask, np.int32([hull]), 255)
return mask
def read(self):
res, self.frame = self.videoCap.read()
if res:
self.originalFrame = self.frame.copy()
else:
self.originalFrame = None
return res
def process(self):
if len(self._backgrounds)==0:
for i, colorFilter in enumerate(self.filters):
firstFrame = self.frame_index
bgDetector = BackGroundDetector(capture=self.videoCap, filterFunction=colorFilter.process)
print('Background detection parameters', self._total_frames*0.04, self._total_frames*0.03)
last_frame = self.frames_range[1] if self.frames_range is not None else None
bg = bgDetector.detect(int(self._total_frames*0.04), int(self._total_frames*0.03), 180, last_frame)
bg = cv2.dilate( bg, kernel=cv2.getStructuringElement( cv2.MORPH_RECT, (5,5) ), iterations=2 )
bg = 255-bg
bg[bg<255]=0
self._backgrounds.append( cv2.bitwise_and(bg, self.mask) )
|
self.frame_index = firstFrame
result = []
for i, colorFilter in enumerate(self.filters):
filterResult = colorFilter.filter(self.frame, self._backgrounds[i])
blobs = | self._searchblobs.process(filterResult)
res = blobs[0] if len(blobs)>=1 else None
result.append(res)
return result
def create_empty_mask(self): return np.zeros( (self.img_height, self.img_width), np.uint8 )
def points_projection(self, points): cam = self.scene_camera; return [cam.calcPixel(*p) for p in points]
@property
def scene_camera(self): return self.scene.getCamera(self.name)
@property
def img_width(self): return int( self.videoCap.get(cv2.CAP_PROP_FRAME_WIDTH) )
@property
def img_height(self): return int( self.videoCap.get(cv2.CAP_PROP_FRAME_HEIGHT) )
@property
def fps(self): return int( self.videoCap.get(cv2.CAP_PROP_FPS) )
@property
def frame_index(self): return int( self.videoCap.get(cv2.CAP_PROP_POS_FRAMES) )
@frame_index.setter
def frame_index(self, value): self.videoCap.set(cv2.CAP_PROP_POS_FRAMES, value)
@property
def currentTime(self):
milli = self.videoCap.get(cv2.CAP_PROP_POS_MSEC)
return datetime.timedelta(milliseconds=milli)
@property
def totalFrames(self): return self.videoCap.get(cv2.CAP_PROP_FRAME_COUNT) |
flowersteam/explauto | explauto/models/motor_primitive.py | Python | gpl-3.0 | 2,072 | 0.000965 | from numpy import linspace, array, arange, tile, dot, zeros
from .gaussian import | Gaussian
f | rom ..utils import rk4
class BasisFunctions(object):
def __init__(self, n_basis, duration, dt, sigma):
self.n_basis = n_basis
means = linspace(0, duration, n_basis)
# FIXME:
variances = duration / (sigma * n_basis)**2
gaussians = [Gaussian(array([means[k]]), array([[variances]]))
for k in range(len(means))]
self.x = arange(0., duration, dt)
y = array([gaussians[k].normal(self.x.reshape(-1, 1)) for k in range(len(means))])
self.z = y / tile(sum(y, 0), (n_basis, 1))
def trajectory(self, weights):
return dot(weights, self.z)
class MovementPrimitive(object):
def __init__(self, duration, n_basis, dt, stiffness=0., damping=0.):
"""
:param float duration: duration of the movement in seconds
:param list dt: time step used for numerical integration
"""
self.dt = dt
self.duration = duration
self.stiffness = stiffness
self.damping = damping
self.basis = BasisFunctions(n_basis, self.duration, dt, 2.)
self.traj = zeros((self.duration/dt, 3))
self.acc = zeros(self.duration/dt) # +1 due to ..utils.rk4 implementation
def acceleration(self, t, state):
intrinsic_acc = - self.stiffness*state[0] - self.damping*state[1]
return array([state[1], self.acc[t / self.dt] + intrinsic_acc])
def trajectory(self, x0, command):
self.acc = self.basis.trajectory(command)
# self.acc[-1] = self.acc[-2] # still due to ..utils.rk4 implementation
t = 0.
self.traj[0, :] = [x0[0], x0[1], self.acc[0]]
i_t = 1
state = x0
while i_t < self.duration / self.dt:
# print i_t, t, self.duration - self.dt
t, state = rk4(t, self.dt, state, self.acceleration)
# print state
self.traj[i_t, :] = [state[0], state[1], self.acc[i_t]]
i_t += 1
return self.traj
|
tjhunter/phd-thesis-tjhunter | python/myscript.py | Python | apache-2.0 | 164 | 0.012195 | from build import save_name
f = open(save_name | ("myfile.txt"),'w')
f.write("XXXXX")
f.close()
f = open(save_name("dir/myfile2.txt"),'w')
f.write("XXXXX")
f.close() | |
Xeralux/tensorflow | tensorflow/contrib/py2tf/utils/py_func.py | Python | apache-2.0 | 4,890 | 0.007771 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spec | ific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pyfunc creation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.framework import dtypes
from tensorflow.python.framew | ork import tensor_util
from tensorflow.python.ops import script_ops
class MatchDType(namedtuple('MatchDType', ('arg_number',))):
"""Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument.
"""
pass
def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
"""Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect.
"""
if return_dtypes and use_dummy_return:
raise ValueError('if use_dummy_return is True, return_dtypes must be empty')
tensor_args = []
tensor_args_idx = {}
# Of the positional arguments, only grab the tensor ones to be passed through
# the py_func.
n_args = len(args)
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
for i in range(n_args):
if arg_is_tensor[i]:
tensor_args_idx[i] = len(tensor_args)
tensor_args.append(args[i])
# We essentially take the tensor kwargs, if any, and add them to the list of
# positional arguments. The kwargs are then reconstructed inside the py_func.
#
# For example, if
#
# args = [Tensor(1), 'foo']
# kwargs = {'a': Tensor(2), 'b': 'bar'}
#
# Then
#
# tensor_args = (Tensor(1), Tensor(2))
# kwarg_keys = ('a', 'b')
if kwargs:
kwarg_keys = tuple(kwargs.keys())
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
for k in kwarg_keys:
if kwarg_is_tensor[k]:
tensor_args_idx[k] = len(tensor_args)
tensor_args.append(kwargs[k])
else:
kwarg_keys = ()
# Set up return dtypes.
def match_arg_dtype(arg_number):
arg = args[arg_number]
if not arg_is_tensor[arg_number]:
raise ValueError(
'argument %d was used with MatchDType and must be a tf.Tensor, but '
'was %s instead' % (arg_number, type(arg)))
return arg.dtype
if return_dtypes:
if isinstance(return_dtypes, MatchDType):
return_dtypes = match_arg_dtype(return_dtypes.arg_number)
elif isinstance(return_dtypes, (list, tuple)):
return_dtypes = tuple(
match_arg_dtype(a.arg_number) if isinstance(a, MatchDType) else a
for a in return_dtypes)
else:
assert isinstance(return_dtypes, dtypes.DType)
def f_wrapper(*tensor_args):
f_args = tuple(
tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a
for i, a in enumerate(args))
f_kwargs = {
k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k]
for i, k in enumerate(kwarg_keys)
}
retval = f(*f_args, **f_kwargs)
return 1 if use_dummy_return else retval
return script_ops.py_func(f_wrapper, tensor_args, dtypes.int64
if use_dummy_return else return_dtypes)
|
wogsland/QSTK | build/lib.linux-x86_64-2.7/Bin/Data_CSV.py | Python | bsd-3-clause | 3,301 | 0.014541 | #File to read the data from mysql and push into CSV.
# Python imports
import datetime as dt
import csv
import copy
import os
import pickle
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# QSTK imports
from QSTK.qstkutil import qsdateutil as du
import QSTK.qstkutil.DataEvolved as de
def get_data(ls_symbols, ls_keys):
'''
@summary: Gets a data chunk for backtesting
@param dt_start: Start time
@param dt_end: End time
@param ls_symbols: symbols to use
@note: More data will be pulled from before and after the limits to ensure
valid data on the start/enddates which requires lookback/forward
@return: data dictionry
'''
print "Getting Data from MySQL"
# Modify dates to ensure enough data for all features
dt_start = dt.datetime(2005,1,1)
dt_end = dt.datetime(2012, 8, 31)
ldt_timestamps = du.getNYSEdays( dt_start, dt_end, dt.timedelta(hours=16) )
c_da = de.DataAccess('mysql')
ldf_data = c_da.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
return d_data
def read_symbols(s_symbols_file):
ls_symbols=[]
file = open(s_symbols_file, 'r')
for f in file.readlines():
j = f[:-1]
ls_symbols.append(j)
file.close()
return ls_symbols
def csv_sym(sym, d_data, ls_keys, s_directory):
bool_first_iter = True
for key in ls_keys:
if bool_first_iter == True:
df_sym = d_data[key].reindex(columns = [sym])
df_sym = df_sym.rename(columns = {sym : key})
bool_first_iter = False
else:
df_temp = d_data[key].reindex(columns = [sym])
df_temp = df_temp.rename(columns = {sym : key})
df_sy | m = df_sym.join(df_temp, how= 'outer')
symfilename = sym.split('-')[0]
sym_file = open(s_directory + symfilename + '.csv', 'w')
sym_file.write("Date,Open,High,Low,Close,Volume,Adj Close \n")
ldt_timestamps = li | st(df_sym.index)
ldt_timestamps.reverse()
for date in ldt_timestamps:
date_to_csv = '{:%Y-%m-%d}'.format(date)
string_to_csv = date_to_csv
for key in ls_keys:
string_to_csv = string_to_csv + ',' + str(df_sym[key][date])
string_to_csv = string_to_csv + '\n'
sym_file.write(string_to_csv)
def main(s_directory, s_symbols_file):
#ls_symbols = read_symbols(s_symbols_file)
ls_symbols = ['ACS-201002','BDK-201003','BJS-201004','BSC-201108','CCT-201111','EQ-200907','JAVA-201002','NCC-200901','NOVL-201104','PBG-201003','PTV-201011','ROH-200904','SGP-200911','SII-201008','WB-200901','WYE-200910','XTO-201006']
ls_keys = ['actual_open', 'actual_high', 'actual_low', 'actual_close', 'volume', 'close']
d_data = get_data(ls_symbols, ls_keys)
# print d_data
print "Creating CSV files now"
for sym in ls_symbols:
print sym
csv_sym(sym,d_data, ls_keys, s_directory)
print "Created all CSV files"
if __name__ == '__main__' :
s_directory = 'MLTData/'
s_directory = os.environ['QSDATA'] + '/Yahoo/'
s_symbols_file1 = 'MLTData/sp5002012.txt'
s_symbols_file2 = 'MLTData/index.txt'
s_symbols_file3 = 'MLTData/sp5002008.txt'
main(s_directory, s_symbols_file3) |
mthh/cartogram_geopandas | cartogram_geopandas.py | Python | gpl-2.0 | 1,738 | 0 | # -*- coding: utf-8 -*-
"""
cartogram_geopandas v0.0.0c:
Easy construction of continuous cartogram on a Polygon/MultiPolygon
GeoDataFrame (modify the geometry in place or create a new GeoDataFrame).
Code adapted to fit the geopandas.GeoDataFrame datastructure from
Carson Farmer's code (https://github.com/carsonfarmer/cartogram : former
code in use in 'Cartogram' QGis python plugin). Carson Farmer's code is
partially related to 'pyCartogram.py' from Eric Wolfs.
Algorithm itself based on
{ Dougenik, J. A, N. R. Chrisman, and D. R. Niemeyer. 1985.
"An algorithm to construct continuous cartograms."
Professional Geographer 37:75-81 }
No warranty concerning the result.
Copyright (C) 2013 Carson Farmer, 2015 mthh
"""
from cycartogram import Cartogram
def make_cartogram(geodf, field_name, iterations=5, inplace=False):
"""
Make a continuous cartogram on a geopandas.GeoDataFrame collection
of Polygon/MultiPolygon (wrapper to call the core functions
written in cython).
:param geopandas.GeoDataFrame geodf: The GeoDataFrame containing the
geometry and a field to use for the transformation.
:param string field_name: The nam | e of the field (Series) containing the
value to use.
:param integer iterations: The number of iterations to make.
[default=5]
:param bool inplace: Append in place if True i | s set. Otherwhise return a
new GeoDataFrame with transformed geometry.
[default=False]
"""
if inplace:
crtgm = Cartogram(geodf, field_name, iterations)
crtgm.make()
else:
crtgm = Cartogram(geodf.copy(), field_name, iterations)
return crtgm.make()
|
spottradingllc/zoom | test/predicate/health_test.py | Python | gpl-2.0 | 1,195 | 0.005021 | import mox
import time
import unittest
from zoom.agent.predicate.health import PredicateHealth
from zoom.common.types import PlatformType
class PredicateHealthTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.interval = 0.1
def tearDown(self):
self.mox.UnsetStubs()
def test_start(self):
pred = PredicateHealth("test", "echo", self.interval, PlatformType.LINUX)
self.mox.StubOutWithMock(pred, "_run")
pred._run().MultipleTimes()
self.mox.ReplayAll()
print "This test should complete quickly"
pred.start()
pred.start() # should noop
pred.start() # should noop
time.sleep(0.25) # give other thread time to check
pred.stop()
self.mox.VerifyAll()
def test_stop(self):
pred = PredicateHealth("test", "echo", self.interval, PlatformType.LINUX)
self.mox.StubOutWithMock(pred, "_run")
pred._run().MultipleTimes()
self.mox.ReplayAll()
|
pred.start()
| time.sleep(0.25) # give other thread time to check
pred.stop()
pred.stop()
pred.stop()
self.mox.VerifyAll()
|
drmelectronic/Despacho | usb/backend/libusb0.py | Python | bsd-3-clause | 20,780 | 0.001396 | # Copyright (C) 2009-2013 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
from ctypes import *
import ctypes.util
import os
import usb.backend
import usb.util
import sys
from usb.core import USBError
from usb._debug import methodtrace
import usb._interop as _interop
import logging
__author__ = 'Wander Lairson Costa'
__all__ = ['get_backend']
_logger = logging.getLogger('usb.backend.libusb0')
# usb.h
_PC_PATH_MAX = 4
if sys.platform.find('bsd') != -1 or sys.platform.find('mac') != -1 or \
sys.platform.find('darwin') != -1:
_PATH_MAX = 1024
elif sys.platform == 'win32' or sys.platform == 'cygwin':
_PATH_MAX = 511
else:
_PATH_MAX = os.pathconf('.', _PC_PATH_MAX)
# libusb-win32 makes all structures packed, while
# default libusb only does for some structures
# _PackPolicy defines the structure packing according
# to the platform.
class _PackPolicy(object):
pass
if sys.platform == 'win32' or sys.platform == 'cygwin':
_PackPolicy._pack_ = 1
# Data structures
class _usb_descriptor_header(Structure):
_pack_ = 1
_fields_ = [('blength', c_uint8),
('bDescriptorType', c_uint8)]
class _usb_string_descriptor(Structure):
_pack_ = 1
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('wData', c_uint16)]
class _usb_endpoint_descriptor(Structure, _PackPolicy):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bEndpointAddress', c_uint8),
('bmAttributes', c_uint8),
('wMaxPacketSize', c_uint16),
('bInterval', c_uint8),
('bRefresh', c_uint8),
('bSynchAddress', c_uint8),
('extra', POINTER(c_uint8)),
('extralen', c_int)]
class _usb_interface_descriptor(Structure, _PackPolicy):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bInterfaceNumber', c_uint8),
('bAlternateSetting', c_uint8),
('bNumEndpoints', c_uint8),
('bInterfaceClass', c_uint8),
('bInterfaceSubClass', c_uint8),
('bInterfaceProtocol', c_uint8),
('iInterface', c_uint8),
('endpoint', POINTER(_usb_endpoint_descriptor)),
('extra', POINTER(c_uint8)),
('extralen', c_int)]
class _usb_interface(Structure, _PackPolicy):
_fields_ = [('altsetting', POINTER(_usb_interface_descriptor)),
('num_altsetting', c_int)]
class _usb_config_descriptor(Structure, _PackPolicy):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('wTotalLength', c_uint16),
('bNumInterfaces', c_uint8),
('bConfigurationValue', c_uint8),
('iConfiguration', c_uint8),
('bmAttributes', c_uint8),
('bMaxPower', c_uint8),
('interface', POINTER(_usb_interface)),
('extra', POINTER(c_uint8)),
('extralen', c_int)]
class _usb_device_descriptor(Structure, _PackPolicy):
_pack_ = 1
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bcdUSB', c_uint16),
('bDeviceClass', c_uint8),
('bDeviceSubClass', c_uint8),
('bDeviceProtocol', c_uint8),
('bMaxPacketSize0', c_uint8),
('idVendor', c_uint16),
('idProduct', c_uint16),
('bcdDevice', c_uint16),
('iManufacturer', c_uint8),
('iProduct', c_uint8),
('iSerialNumber', c_uint8),
('bNumConfigurations', c_uint8)]
class _usb_device(Structure, _PackPolicy):
pass
class _usb_bus(Structure, _PackPolicy):
pass
_usb_device._fields_ = [('next', POINTER(_usb_device)),
('prev', POINTER(_usb_device)),
('filename', c_int8 * (_PATH_MAX + 1)),
('bus', POINTER(_usb_bus)),
('descriptor', _usb_device_descriptor),
('config', POINTER(_usb_config_descriptor)),
('dev', c_void_p),
('devnum', c_uint8),
('num_children', c_ubyte),
('children', POINTER(POINTER(_usb_device)))]
_usb_bus._fields_ = [('next', POINTER(_usb_bus)),
('prev', POINTER(_usb_bus)),
('dirname', c_char * (_PATH_MAX + 1)),
('devices', POINTER(_usb_device)),
('location', c_uint32),
('root_dev', POINTER(_usb_device))]
_usb_dev_handle = c_void_p
class _DeviceDescriptor:
def __init__(self, dev):
desc = dev.descriptor
self.bLength = desc.bLength
self.bDescriptorType = desc.bDescriptorType
self.bcdUSB = desc.bcdUSB
self.bDeviceClass = desc.bDeviceClass
self.bDeviceSubClass = desc.bDeviceSubClass
self.bDeviceProtocol = desc.bDeviceProtocol
self.bMaxPacketSize0 = desc.bMaxPacketSize0
self.idVendor = desc.idVendor
self.idProduct = desc.idProduct
self.bcdDevice = desc.bcdDevice
self.iManufacturer = desc.iManufacturer
self.iProduct = desc.iProduct
self.iSerialNumber = desc.iSerialNumber
self.bNumConfigurations = desc.bNumConfigurations
self.address = dev.devnum
self.bus = dev.bus[0].location
self.port_number = None
_lib = None
def _load_library():
if sys.platform != 'cygwin':
candidates = ('usb-0.1', 'usb', 'libusb0')
for candidate in candidates:
# Workaround for CPython 3.3 issue#16283 / pyusb #14
if sys.platform == 'win32':
candidate = candidate + '.dll'
libname = ctypes.util.find_library(candidate)
if libname is not None: break
else:
# corner cases
# cygwin predefines library names with 'cyg' instead of 'lib'
try:
return CDLL('cygusb0.dll')
except:
_logger.error('Libusb 0 could not be loaded in cygwin', exc_info=True)
raise OSError('USB library could not be found')
return CDLL(libname)
def _setup_prototyp | es(lib):
# usb_dev_handle *usb_open(struct usb_device *dev);
lib.usb_open.argtypes = [POINTER(_usb_device)]
lib.usb_open.restype = _usb_dev_handle
# int usb_close(usb_dev_handle *dev);
lib.usb_close.argtypes = [_usb_dev_handle]
# int usb_get_string(usb_dev_handle *dev,
# int index,
# | int langid,
# |
RPGOne/Skynet | scikit-learn-0.18.1/examples/model_selection/grid_search_digits.py | Python | bsd-3-clause | 2,764 | 0 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.im | ages.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for | %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
guardiaocl/guardiaocl-servers | guardiancl/guardianService.py | Python | apache-2.0 | 10,091 | 0.00327 | import getpass
import json
import os
import platform
from datetime import datetime
import urllib3
import psutil
from apscheduler.schedulers.blocking import BlockingScheduler
import console
from utils import Utils
from account import Account
from device import Device
utils = Utils()
system_config = utils.get_system_config()
sched = BlockingScheduler()
def authenticate(serial=None, email=None, password=None):
serial = raw_input("Serial:") if not serial else serial
email = raw_input("E-mail:") if not email else email
password = getpass.getpass("Password:") if not password else password
if not serial or not email or not password:
console.info("Serial, E-mail or Password Invalid!")
else:
http = urllib3.PoolManager()
url = system_config['ROUTES'].get('auth')
params = {'email': email, 'password': password}
account = Account
response = {}
try:
response = http.request('POST', url, params, encode_multipart=False)
except Exception, e:
console.error("Check your connection", exc_info=True)
return None
if response.status == 200:
data = json.loads(response.data)
return {'serial': serial, 'account': account(data['email'], response.getheaders()['user-key'], data['apiKey'])}
else:
console.error(response.data, True)
return None
def create_config_file():
account = authenticate()
if account:
device = None
console.info("Checking device...")
for dev in get_devices(account.get('account')):
if dev.serial == account.get('serial'):
device = dev
console.info("Creating config file...")
generate_config_file(account.get('account'), device)
def list_configs(opt):
config = utils.get_config()
def to_str(value, key):
return "[%s] %s %s" % (value, "-", key)
if opt == 'disk':
disks = config[utils.DISK_SECTION]
console.info('Disks list...', True)
console.log('\n'.join([to_str(value, key) for key, value in disks.iteritems()]))
if opt == 'inet':
inet = config[utils.INET_SECTION]
console.info('Net Interface list...', True)
console.log('\n'.join([to_str(value, key) for key, value in inet.iteritems()]))
def start_monitor():
console.info("Starting guardiancl monitor...", True)
config = utils.get_config()
http = urllib3.PoolManager()
utils.create_pid_file()
def collect_job():
config = utils.get_config()
disks = config[utils.DISK_SECTION]
interfaces = config[utils.INET_SECTION]
account = Account(config[utils.GENERAL_SECTION].get('email'),
config[utils.GENERAL_SECTION].get('user_key'),
config[utils.GENERAL_SECTION].get('api_key'))
report = {}
usage = {}
net = {}
if os.name == 'nt':
report['os'] = platform.system()+"-"+platform.win32_ver()[0]+" "+platform.win32_ver()[2]
report['arch'] = platform.architecture()[0]
else:
report['loadAverage'] = {}
if not os.name == 'nt':
for idx, la in enumerate(os.getloadavg()):
time_la = "1" if idx == 0 else "5" if idx == 2 else "15"
report['loadAverage'][time_la] = "{0:.2f}".format(la)
if platform.system() == 'Linux':
report['os'] = platform.linux_distribution()[0]+"-"+platform.linux_distribution()[1]+" "+platform.linux_distribution()[2]
report['arch'] = platform.architecture()[0]
else:
report['os'] = "Mac OS X - "+platform.mac_ver()[0]
report['arch'] = platform.architecture()[0]
for disk in disks.keys():
if disks[disk] == utils.ENABLED and check_disk(disk):
usage_temp = psutil.disk_usage(disk)
usage[disk] = {'total': usage_temp.total, 'used': usage_temp.used, 'free': usage_temp.free,
'percentage': usage_temp.percent}
for interf in interfaces.keys():
if interfaces[interf] == utils.ENABLED:
net_temp = dict((k.lower(),v) for k, v in psutil.net_io_counters(pernic=True).iteritems())[interf]
net[interf] = {'sent': net_temp.bytes_sent, 'recv': net_temp.bytes_recv}
report['inet'] = net
report['disks'] = usage
report['processes'] = {'value': len(psutil.pids())}
report['loadAverage'] = {}
if not os.name == 'nt':
for idx, la in enumerate(os.getloadavg()):
time_la = "1" if idx == 0 else "5" if idx == 2 else "15"
report['loadAverage'][time_la] = "{0:.2f}".format(la)
report['users'] = {'value': len(psutil.users())}
report['uptime'] = str(datetime.now() - datetime.fromtimestamp(psutil.boot_time())).split('.')[0]
report['kindDevice'] = 3
api_key = account.api_key
url = "%s/%s" % (system_config['ROUTES'].get('collect'), config[utils.GENERAL_SECTION].get('serial'))
params = {'apiKey': api_key, 'data': json.dumps(report)}
try:
response = http.request('POST', url, params, {'user-key': account.user_key}, encode_multipart=False)
except Exception, e:
console.error("Check your connection")
return
if response.status == 200:
console.info("Information sent...")
else:
data = json.loads(response.data)
console.error(data['status'])
console.info("Sending informations...")
sched.add_job(collect_job, 'interval', max_instances=1, seconds=int(config[utils.GENERAL_SECTION].get('refresh_time')))
try:
sched.start()
except (KeyboardInterrupt, SystemExit):
utils.remove_pid_file()
pass
def check_disk(disk):
disks_temp = []
for part in psutil.disk_partitions(all=False):
disks_temp.append(part.mountpoint)
return disk in disks_temp
def get_devices(account):
http = urllib3.PoolManager()
url = system_confi | g['ROUTES'].get('devices')
if account is not None:
try:
response = http.request('GET', url, None, {'user-key': account.user_key})
except Exception, e:
console.error("Check your connection", exc_info=True)
if response.status == 200:
devices = []
for dev i | n json.loads(response.data):
devices.append(Device(dev['serial'], dev['description']))
return devices
else:
data = json.loads(response.data)
console.error(data['status'])
return None
def generate_config_file(account, device):
if account is not None and device is not None:
disks_temp = []
for part in psutil.disk_partitions(all=False):
disks_temp.append(part.mountpoint)
disks = dict([(disk, "enabled") for idx, disk in enumerate(disks_temp)])
net = dict([(net, "enabled") for idx, net in
enumerate(psutil.net_io_counters(pernic=True).keys())])
general = {'email': account.email, 'user_key': account.user_key, 'serial': device.serial,
'api_key': account.api_key, 'refresh_time': '5'}
configs = {'disks': disks, 'net': net, 'general': general}
utils.create_config_file(configs)
console.info("Config file created...")
else:
console.info("Account or Device not found...")
def update_config_file(type_conf=utils.GENERAL_SECTION):
config = utils.get_config()
if type_conf == utils.GENERAL_SECTION:
serial_conf = config[utils.GENERAL_SECTION].get('serial')
refresh_time_conf = config[utils.GENERAL_SECTION].get('refresh_time')
email_conf = config[utils.GENERAL_SECTION].get('email')
serial = raw_input("Serial [%s]: " % serial_conf) or serial_conf
refresh_time = raw_input("Refresh Time(seconds) [%s]: " % refresh_time_conf) or refresh_time_conf
while refresh_time < 5:
conso |
sparkslabs/kamaelia_ | Tests/Python/Axon/test_Ipc.py | Python | apache-2.0 | 8,217 | 0.023853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# Aim: Full coverage testing of the Ipc classes
#
# Test the module loads
import unittest
from Axon.Ipc import *
class ipc_Test(unittest.TestCase):
def test_SmokeTest(self):
"ipc - Should be derived from object."
self.failUnless(isinstance(ipc(),object), "IPC objects should also be instances of object.")
class newComponent_Test(unittest.TestCase):
def test___init__SmokeTest_NoArguments(self):
"newComponent.__init__ - Should work without problems."
nc=newComponent()
self.failUnless(isinstance(nc, ipc), "newComponent should be derived from ipc class")
self.failUnless(len(nc.components())==0, "There should be no components in the message if the constructor was called with no arguments.")
def test___init__SmokeTest(self):
"newComponent.__init__ - Groups all the arguments as a tuple of components that need to be activated/added to the run queue. Order is unimportant, scheduler doesn't care."
nc=newComponent("ba","da","bing")
self.failUnless(nc.components()==("ba","da","bing"), "Component tuple is not as expected.")
def test_components(self):
"newComponent.components - Returns a tuple of components that need to be added to the run queue/activated. Same test as for __init__ as they are counterparts."
nc=newComponent("ba","da","bing")
self.failUnless(nc.components()==("ba","da","bing"), "components returned something other than expected.")
class shutdownMicroprocess_Test(unittest.TestCase):
def test___init__SmokeTest_NoArguments(self):
"shutdownMicroprocess.__init__ - Should work without problems."
sm=shutdownMicroprocess()
self.failUnless(isinstance(sm,ipc), "shutdownMicroprocess should be derived from ipc")
self.failUnless(sm.microprocesses()==(), "Microprocess tuple not empty as expected.")
def test___init__SmokeTest(self):
"shutdownMicroprocess.__init__ - Treats all the arguments as a tuple of microprocesses that need to be shutdown."
sm=shutdownMicroprocess("ba","da","bing")
self.failUnless(sm.microprocesses()==("ba","da","bing"), "Stored tuple not as expected.")
def test_microprocesses(self):
"shutdownMicroprocess.microprocesses- Returns the list of microprocesses that need to be shutdown. This is essentially the counterpart to the __init__ test."
sm=shutdownMicroprocess("ba","da","bing")
self.failUnless(sm.microprocesses()==("ba","da","bing"), "Returned tuple not as expected.")
class notify_Test(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"notify.__init__ - Called without arguments fails."
self.failUnlessRaises(TypeError, notify)
def test_SmokeTest_MinArguments(self):
"notify.__init__ - Creates a message from a specific caller with some data payload to notify part of the system of an event."
n=notify("caller", "payload")
self.failUnless(isinstance(n, ipc), "Expected notify to be an instance of ipc.")
self.failUnless(n.object == "payload", "Payload argument not stored in object member.")
self.failUnless(n.caller == "caller", "Caller argument not stored in caller member.")
class status_Test(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"status.__init__ - Called without arguments fails."
self.failUnlessRaises(TypeError, status)
def test_SmokeTest_MinArguments(self):
"status.__init__ - Stores the status message - for extraction by the recipient of the message. C | hecks object is instance of ipc."
s=status("Status message.")
self.failUnless(isinstance(s, ipc), "status should be derived from ipc.")
self.failUnless(s.status() == "Status message.", "Status message not stored properly.")
def test_status(self):
"status.status - Returns the status message stored inside the status object. Counterpart | to __init__ test."
s=status("Status message.")
self.failUnless(s.status() == "Status message.", "Status message not stored properly.")
class wouldblock_Test(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"wouldblock.__init__ - Called without arguments fails."
self.failUnlessRaises(TypeError, wouldblock)
def test_SmokeTest_MinArguments(self):
"wouldblock.__init__ - Stores the caller in the wouldblock message. Allows the scheduler to make a decision. Checks wouldblock is a subclass of ipc."
wb=wouldblock(self)
self.failUnless(isinstance(wb, ipc), "wouldblock should be derived from ipc")
self.failUnless(wb.caller == self, "caller not properly set by __init__.")
class producerFinished_Test(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"producerFinished.__init__ - Called without arguments defaults to a caller of None, message of None. Checks producerFinished is a subclass of ipc"
pf=producerFinished()
self.failUnless(isinstance(pf, ipc), "producerFinished should be an derived from ipc.")
self.failUnless(pf.caller== None, "caller does not default to None")
self.failUnless(pf.message == None, "message does not default to None")
def test_SmokeTest_MinArguments(self):
"test_SmokeTest.__init__ - Creates a producerFinished message with specified caller & shutdown 'last' message."
pf = producerFinished("caller", "message")
self.failUnless(pf.caller == "caller", "caller not set correctly by position.")
self.failUnless(pf.message == "message", "message not set correctly by position.")
pf2 = producerFinished(message="message", caller="caller")
self.failUnless(pf2.caller == "caller", "caller not set correctly by name.")
self.failUnless(pf2.message == "message", "message not set correctly by name.")
class errorInformation_Test(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"errorInformation.__init__ - Called without arguments fails - must include caller."
self.failUnlessRaises(TypeError, errorInformation)
def test_SmokeTest_MinArguments(self):
"errorInformation.__init__ - Takes the supplied caller, and creates an errorInformation object. Checks errorInformation object is an instance of ipc."
ei=errorInformation(self)
self.failUnless(isinstance(ei, ipc), "errorInformation should be derived from ipc.")
self.failUnless(ei.caller == self, "Caller is not set properly.")
def test_SmokeTest_MinSensibleArguments(self):
"errorInformation.__init__ - An exception & message (any object) in addition to the caller to provide a more meaningful errorInformation message where appropriate. ttbw "
ei=errorInformation("caller", "exception", "message")
self.failUnless(ei.caller == "caller", "Caller is not set properly by position.")
self.failUnless(ei.message == "message", "Caller is not set properly by position.")
self.failUnless(ei.exception == "exception", "Caller is not set properly by position.")
ei=errorInformation(exception="exception", message="message", caller = "caller")
self.failUnless(ei.caller == "caller", "Caller is not set properly by name.")
self.failUnless(ei.message == "message", "Caller is not set properly by name.")
self.failUnless(ei.exception == "exception", "Caller is not set properly by name.")
if __na |
wikimedia/pywikibot-wikibase | tests/test_itempage.py | Python | mit | 1,946 | 0 | import unittest
import json
import os
from pywikibase import ItemPage, Claim
try:
| unicode = unicode
except NameError:
basestring = (str, bytes)
class TestItemPage(unittest.TestCase):
def setUp(self):
with open(os.path.join(os.path.split(__file__)[0],
'data', 'Q7251.wd')) as f:
self._content = json.load(f)['entities']['Q7251']
self.item_page = ItemPage()
self.item_page.get(content=self._content)
def test_init_item(self):
self.assertEqual(self.item_page.getID(), 'Q7251')
self.asse | rtRaises(RuntimeError, ItemPage, title='Null')
self.assertRaises(RuntimeError, ItemPage, title='P15')
def test_sitelinks(self):
self.assertEqual(len(self.item_page.sitelinks), 134)
self.assertIn('fawiki', self.item_page.sitelinks)
self.assertNotIn('fa', self.item_page.sitelinks)
self.assertIsInstance(self.item_page.sitelinks['enwiki'], basestring)
def test_add_claim(self):
claim = Claim('P17', datatype='wikibase-item')
claim.setTarget(ItemPage('Q91'))
self.item_page.addClaim(claim)
self.assertIn('P17', self.item_page.claims)
self.assertEqual(len(self.item_page.claims['P17']), 1)
self.assertIsInstance(self.item_page.claims['P17'][0], Claim)
def test_remove_claim(self):
claim = self.item_page.claims['P31'][0]
old_claims = self.item_page.claims.copy()
self.item_page.removeClaims(claim)
self.assertNotEqual(self.item_page.claims, old_claims)
self.assertNotIn('P31', self.item_page.claims)
def test_badges(self):
self.assertEqual(len(self.item_page.badges), 4)
self.assertEqual(self.item_page.badges['enwiki'], ['Q17437798'])
self.assertIn('enwiki', self.item_page.badges)
self.assertNotIn('fawiki', self.item_page.badges)
if __name__ == '__main__':
unittest.main()
|
p-montero/py-ans | class9/exercise2/mytest/__init__.py | Python | apache-2.0 | 97 | 0 | f | rom mytes | t.simple import func1
from mytest.whatever import func2
from mytest.world import func3
|
wuqize/FluentPython | chapter8/_weakref.py | Python | lgpl-3.0 | 277 | 0.025271 | # -*- coding: utf-8 -*-
"""
Created | on Wed May 10 23:08:27 2017
"""
import time
import weakref
s1 = {1, 2, 3}
s2 = s1
def bye():
print('Gone with the wind...')
ender = weakref.finalize(s1, bye)
print ender.alive
def s1
print ender.aliv | e
s2 = 'spam'
print ender.alive |
MuckRock/muckrock | muckrock/organization/views.py | Python | agpl-3.0 | 4,827 | 0.000829 | """
Views for the organization application
"""
# Django
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db.models.query import Prefetch
from django.http.response import Http404
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.http import is_safe_url
from django.views.generic import DetailView, RedirectView
# MuckRock
from muckrock.core.views import MROrderedListView
from muckrock.foia.models import FOIARequest
from muckrock.organization.models import Organization
class OrganizationListView(MROrderedListView):
"""List of organizations"""
model = Organization
template_name = "organization/list.html"
sort_map = {"name": "name"}
def get_queryset(self):
"""Filter out individual orgs and private orgs for non-staff"""
queryset = (
super(OrganizationListView, self).get_queryset().filter(individual=False)
)
if not self.request.user.is_staff:
queryset = queryset.filter(private=False)
return queryset
class OrganizationDetailView(DetailView):
"""Organization detail view"""
queryset = Organization.objects.filter(individual=False).prefetch_related(
Prefetch("users", queryset=User.objects.select_related("profile"))
)
template_name = "organization/detail.html"
def get_object(self, queryset=None):
"""Get the org"""
org = super(OrganizationDetailView, self).get_object(queryset=queryset)
user = self.request.user
is_member = user.is_authenticated and org.has_member(user)
if org.private and not is_member and not user.is_staff:
raise Http404
return org
def get_ | context_data(self, **kwargs):
"""Add extra context data"""
context = super(OrganizationDetailView, self).get_context_data(**kwargs)
organization = context["organization"]
user = self.request.user
context["is_staff"] = user.is_staff
if user.is_authentic | ated:
context["is_admin"] = organization.has_admin(user)
context["is_member"] = organization.has_member(user)
else:
context["is_owner"] = False
context["is_member"] = False
requests = FOIARequest.objects.organization(organization).get_viewable(user)
context["requests"] = {
"count": requests.count(),
"filed": requests.order_by("-composer__datetime_submitted")[:10],
"completed": requests.get_done().order_by("-datetime_done")[:10],
}
context["members"] = organization.users.all()
if organization.requests_per_month > 0:
context["requests_progress"] = (
float(organization.monthly_requests) / organization.requests_per_month
) * 100
else:
context["requests_progress"] = 0
context["sidebar_admin_url"] = reverse(
"admin:organization_organization_change", args=(organization.pk,)
)
return context
class OrganizationSquareletView(RedirectView):
"""Organization squarelet view redirects to squarelet"""
def get_redirect_url(self, *args, **kwargs):
"""Different URL for individual orgs"""
slug = kwargs["slug"]
organization = get_object_or_404(Organization, slug=slug)
if organization.individual:
user = User.objects.get(profile__uuid=organization.uuid)
return "{}/users/{}/".format(settings.SQUARELET_URL, user.username)
else:
return "{}/organizations/{}/".format(settings.SQUARELET_URL, slug)
@login_required
def activate(request):
"""Activate one of your organizations"""
redirect_url = request.POST.get("next", "/")
redirect_url = (
redirect_url if is_safe_url(redirect_url, allowed_hosts=None) else "/"
)
try:
organization = request.user.organizations.get(
pk=request.POST.get("organization")
)
request.user.profile.organization = organization
# update the navbar header cache
cache.set(
"sb:{}:user_org".format(request.user.username),
organization,
settings.DEFAULT_CACHE_TIMEOUT,
)
messages.success(
request,
"You have switched your active organization to {}".format(
organization.display_name
),
)
except Organization.DoesNotExist:
messages.error(request, "Organization does not exist")
except ValueError:
messages.error(request, "You are not a member of that organization")
return redirect(redirect_url)
|
pitunti/alfaPitunti | plugin.video.alfa/servers/zippyshare.py | Python | gpl-3.0 | 1,478 | 0.006089 | # -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
result = False
message = ''
try:
error_message_file_not_exists = 'File does not exist on this server'
error_message_file_deleted = 'File has expired and does not exi | st anymore on this server'
data = httptools.downloadpage(page_url).data
if error_message_file_not_exists in data:
message = 'File does not exist.'
elif error_message_file_deleted in data:
message = 'File deleted.'
else:
result = True
except Exception as ex:
message = ex.message
return result, message
def get_video_url(page_ | url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
match = re.search('(.+)/v/(\w+)/file.html', page_url)
domain = match.group(1)
patron = 'getElementById\(\'dlbutton\'\).href\s*=\s*(.*?);'
media_url = scrapertools.find_single_match(data, patron)
numbers = scrapertools.find_single_match(media_url, '\((.*?)\)')
url = media_url.replace(numbers, "'%s'" % eval(numbers))
url = eval(url)
mediaurl = '%s%s' % (domain, url)
extension = "." + mediaurl.split('.')[-1]
video_urls.append([extension + " [zippyshare]", mediaurl])
return video_urls
|
davygeek/vitess | test/backup_only.py | Python | apache-2.0 | 14,271 | 0.008689 | #!/usr/bin/env python
# Copyright 2017 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import unittest
import datetime
import MySQLdb
import environment
import tablet
import vtbackup
import utils
from mysql_flavor import mysql_flavor
use_mysqlctld = False
use_xtrabackup = False
stream_mode = 'tar'
tablet_master = None
tablet_replica1 = None
tablet_replica2 = None
backup_tablet = None
xtrabackup_args = []
new_init_db = ''
db_credentials_file = ''
def setUpModule():
global xtrabackup_args
xtrabackup_args = ['-backup_engine_implementation',
'xtrabackup',
'-xtrabackup_stream_mode',
stream_mode,
'-xtrabackup_user=vt_dba',
'-xtrabackup_backup_flags',
'--password=VtDbaPass']
global new_init_db, db_credentials_file
global tablet_master, tablet_replica1, tablet_replica2, backup_tablet
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
backup_tablet = vtbackup.Vtbackup(vt_dba_passwd='VtDbaPass')
try:
environment.topo_server().setup()
credentials = {
'vt_dba': ['VtDbaPass'],
'vt_app': ['VtAppPass'],
'vt_allprivs': ['VtAllprivsPass'],
'vt_repl': ['VtReplPass'],
'vt_filtered': ['VtFilteredPass'],
}
db_credentials_file = environment.tmproot+'/db_credentials.json'
with open(db_credentials_file, 'w') as fd:
fd.write(json.dumps(credentials))
# Determine which column is used for user passwords in this MySQL version.
proc = tablet_master.init_mysql()
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
try:
tablet_master.mquery('mysql', 'select password from mysql.user limit 0',
user='root')
password_col = 'password'
except MySQLdb.DatabaseError:
password_col = 'authentication_string'
utils.wait_procs([tablet_master.teardown_mysql()])
tablet_master.remove_tree(ignore_options=True)
# Create a new init_db.sql file that sets up passwords for all users.
# Then we use a db-credentials-file with the passwords.
new_init_db = environment.tmproot + '/init_db_with_passwords.sql'
with open(environment.vttop + '/config/init_db.sql') as fd:
init_db = fd.read()
with open(new_init_db, ' | w') as fd:
fd.write(init_db)
fd.write(mysql_flavor().change_passwords(password_col))
logging.debug("initilizing mysql %s",str(datetime.datetime.now()))
# start mysql instance external to the test
setup_procs = [
tablet_master.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica1.init_mysql(init_db=new_init_db,
| extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica2.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file])
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
logging.debug("done initilizing mysql %s",str(datetime.datetime.now()))
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_master.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica1.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica2.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file])
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
backup_tablet.remove_tree()
class TestBackup(unittest.TestCase):
def setUp(self):
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
def tearDown(self):
for t in tablet_master, tablet_replica1, tablet_replica2:
t.kill_vttablet()
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.set_semi_sync_enabled(master=False, slave=False)
t.clean_dbs()
for backup in self._list_backups():
self._remove_backup(backup)
def _init_tablets(self,init=True,start=True):
xtra_args = ['-db-credentials-file', db_credentials_file]
if use_xtrabackup:
xtra_args.extend(xtrabackup_args)
tablet_master.init_tablet('replica', 'test_keyspace', '0', start=start,
supports_backups=True,
extra_args=xtra_args)
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=start,
supports_backups=True,
extra_args=xtra_args)
if init:
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_master.tablet_alias])
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_data(self, t, index):
"""Add a single row with value 'index' to the given tablet."""
t.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def _check_data(self, t, count, msg):
"""Check that the specified tablet has the expected number of rows."""
timeout = 10
while True:
try:
result = t.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == count:
break
except MySQLdb.DatabaseError:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step(msg, timeout)
def _restore(self, t, tablet_type='replica',wait_for_state='SERVING'):
"""Erase mysql/tablet dir, then start tablet with restore enabled."""
logging.debug("restoring tablet %s",str(datetime.datetime.now()))
self._reset_tablet_dir(t)
xtra_args = ['-db-credentials-file', db_credentials_file]
if use_xtrabackup:
xtra_args.extend(xtrabackup_args)
t.start_vttablet(wait_for_state=wait_for_state,
init_tablet_type=tablet_type,
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True,
extra_args=xtra_args)
logging. |
benagricola/exabgp | lib/exabgp/reactor/daemon.py | Python | bsd-3-clause | 4,987 | 0.039503 | # encoding: utf-8
"""
daemon.py
Created by Thomas Mangin on 2011-05-02.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import os
import sys
import pwd
import errno
import socket
from exabgp.configuration.environment import environment
from exabgp.logger import Logger
MAXFD = 2048
class Daemon (object):
def __init__ (self, reactor):
self.pid = environment.settings().daemon.pid
self.user = environment.settings().daemon.user
self.daemonize = environment.settings().daemon.daemonize
self.umask = environment.settings().daemon.umask
self.logger = Logger()
self.reactor = reactor
os.chdir('/')
os.umask(self.umask)
def check_pid (self,pid):
if pid < 0: # user input error
return False
if pid == 0: # all processes
return False
try:
os.kill(pid, 0)
return True
except OSError as err:
if err.errno == errno.EPERM: # a process we were denied access to
return True
if err.errno == errno.ESRCH: # No such process
return False
# should never happen
return False
def savepid (self):
self._saved_pid = False
if not self.pid:
return True
ownid = os.getpid()
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
mode = ((os.R_OK | os.W_OK) << 6) | (os.R_OK << 3) | os.R_OK
try:
fd = os.open(self.pid,flags,mode)
except OSError:
try:
pid = open(self.pid,'r').readline().strip()
if self.check_pid(int(pid)):
self.logger.daemon("PIDfile already exists and program still running %s" % self.pid)
return False
else:
# If pid is not running, reopen file without O_EXCL
fd = os.open(self.pid,flags ^ os.O_EXCL,mode)
except (OSError,IOError,ValueError):
pass
try:
f = os.fdopen(fd,'w')
line = "%d\n" % ownid
f.write(line)
f.close()
self._saved_pid = True
except IOError:
self.logger.daemon("Can not create PIDfile %s" % self.pid,'warning')
return False
self.logger.daemon("Created PIDfile %s with value %d" % (self.pid,ownid),'warning')
return True
def removepid (self):
if not self.pid or not self._saved_pid:
return
try:
os.remove(self.pid)
except OSError,exc:
if exc.errno == errno.ENOENT:
pass
else:
self.logger.daemon("Can not remove PIDfile %s" % self.pid,'error')
return
self.logger.daemon("Removed PIDfile %s" % self.pid)
def drop_privileges (self):
"""return true if we are left with insecure privileges"""
# os.name can be ['posix', 'nt', 'os2', 'ce', 'java', 'riscos']
if os.name not in ['posix',]:
return True
uid = os.getuid()
gid = os.getgid()
if uid and gid:
return True
try:
user = pwd.getpwnam(self.user)
nuid = int(user.pw_uid)
ngid = int(user.pw_gid)
except KeyError:
return False
# not sure you can change your gid if you do not have a pid of zero
try:
# we must change the GID first otherwise it may fail after change UID
if not gid:
os.setgid(ngid)
if not uid:
os.setuid(nuid)
| cuid = os.getuid()
ceid = os.geteuid()
cgid = os.getgid()
if cuid < 0:
cuid += (1 << 32)
if cgid < 0:
cgid += (1 << 32)
if ceid < 0:
ceid += (1 << 32)
if nuid != cuid or nuid != ceid or ngid != cgid:
return False
except OSError:
return False
return True
def _is_socket ( | self, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
except ValueError:
# The file descriptor is closed
return False
try:
s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)
except socket.error,exc:
# It is look like one but it is not a socket ...
if exc.args[0] == errno.ENOTSOCK:
return False
return True
def daemonise (self):
if not self.daemonize:
return
log = environment.settings().log
if log.enable and log.destination.lower() in ('stdout','stderr'):
self.logger.daemon('ExaBGP can not fork when logs are going to %s' % log.destination.lower(),'critical')
return
def fork_exit ():
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError,exc:
self.logger.reactor('Can not fork, errno %d : %s' % (exc.errno,exc.strerror),'critical')
# do not detach if we are already supervised or run by init like process
if self._is_socket(sys.__stdin__.fileno()) or os.getppid() == 1:
return
fork_exit()
os.setsid()
fork_exit()
self.silence()
def silence (self):
# closing more would close the log file too if open
maxfd = 3
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError:
pass
os.open("/dev/null", os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
# import resource
# if 'linux' in sys.platform:
# nofile = resource.RLIMIT_NOFILE
# elif 'bsd' in sys.platform:
# nofile = resource.RLIMIT_OFILE
# else:
# self.logger.daemon("For platform %s, can not close FDS before forking" % sys.platform)
# nofile = None
# if nofile:
# maxfd = resource.getrlimit(nofile)[1]
# if (maxfd == resource.RLIM_INFINITY):
# maxfd = MAXFD
# else:
# maxfd = MAXFD
|
317070/ppbe-finance | finance/tests.py | Python | unlicense | 725 | 0.008276 | from django.test import TestCase
from finance.models import Banking_Account
class IBANTestCase(TestCase):
def setUp(self):
pass
def test_iban_converter(self):
"""BBAN to IBAN conversion"""
self.assertEqual(Banking_Account.convertBBANToIBAN("091-0002777-90"), 'BE3409100 | 0277790')
self.assertEqual(Banking_Account.convertBBANToIBAN("679-2005502-27"), 'BE48679200550227')
self.assertEqual(Banking_Account.isBBAN("679-2005502-27"), True)
self.assertEqual(Banking_Account.isBBAN('BE48679200550227'), False)
| self.assertEqual(Banking_Account.isIBAN("679-2005502-27"), False)
self.assertEqual(Banking_Account.isIBAN('BE48679200550227'), True)
|
takearest118/coconut | handlers/v1/invitation.py | Python | gpl-3.0 | 2,604 | 0.00192 | # -*- coding: utf-8 -*
from tornado.web import HTTPError
from common.decorators import parse_argument, app_auth_async
from handlers.base import JsonHandler, WSHandler
from models.invitation import InvitationModel
from models.admin import AdminModel
from models.notification import NotificationModel
class SubmitHandler(JsonHandler):
@app_auth_async
async def put(self, *args, **kwargs):
desk_number = self.json_d | ecoded_body.get('desk_number', None)
if not isinstance(desk_number, int):
raise HTTPError(400, 'invalid desk_number')
mobile_number = self.json_decoded_body.get('mobile_number', None)
if not mobile_number or len(mobile_number) == 0:
raise HTTPError(400, 'invalid mobile_number(+821022223333)')
target_admin = await AdminModel.find_one({'desk_number': desk_number})
if not target_admin:
| raise HTTPError(400, 'not exist desk number')
query = {
'mobile_number': mobile_number
}
invitation = await InvitationModel.find_one(query)
if not invitation:
raise HTTPError(400, 'not exist mobile number')
# save notification
notification = NotificationModel(raw_data=dict(
admin_oid=target_admin['_id'],
type='request_auth',
message='requested %s' % invitation['mobile_number'],
data=invitation
))
await notification.insert()
ws_data = dict(
admin_oid=str(target_admin['_id']),
_id=str(invitation['_id']),
name=invitation['name'],
mobile_number=invitation['mobile_number'],
assignee=invitation['assignee'],
type=invitation['type'],
fee=invitation['fee'],
gender=invitation['gender'],
email=invitation['email'],
group=invitation['group'],
entered=invitation['entered'],
birthday=invitation['birthday'],
updated_at=int(invitation['updated_at'].strftime('%s')),
created_at=int(invitation['created_at'].strftime('%s')))
WSHandler.write_to_clients(ws_data)
self.response['data'] = invitation
self.write_json()
class AdminListHandler(JsonHandler):
@app_auth_async
@parse_argument([('start', int, 0), ('size', int, 10), ])
async def get(self, *args, **kwargs):
parsed_args = kwargs.get('parsed_args')
result = await AdminModel.find(skip=parsed_args['start'], limit=parsed_args['size'])
self.response['data'] = result
self.write_json() |
heidtn/quadquad | quadquad_hardware/scripts/servo_interface.py | Python | gpl-3.0 | 2,668 | 0.037856 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from quadquad_hardware.msg import QuadServos
import serial
from serial.tools import list_ports
import sys
ser = None
servomap = {
'BLHip': [0,-1],
'BRHip': [2, 1],
'FLHip': [5,-1],
'FRHip': [6, 1],
'BLLeg': [1, 1],
'BRLeg': [3,-1],
'FLLeg': [4,-1],
'FRLeg': [7, 1]
}
speed = 20
def set_speed(n, speed):
#Quick check that things are in range
if speed > 127 or speed <0:
speed=1
print "WARNING: Speed should be between 0 and 127. Setting speed to 1..."
speed=int(speed)
#set speed (needs 0x80 as first byte, 0x01 as the second, 0x01 is for speed, 0 for servo 0, and 127 for max speed)
bud=chr(0x80)+chr(0x01)+chr(0x01)+chr(n)+chr(speed)
ser.write(bud)
def set_pos(n, angle, direction):
#Check that things are in range
if angle > 180 or angle <0:
angle=90.0
rospy.loginfo("WARNING: Bad angle. Setting angle to 90 degrees to be safe...")
#Check direction of limb
if direction == -1:
angle = 180 - angle
#Valid range is 500-5500
offyougo=int(5000*angle/180)+500
#Get the lowest 7 bits
byteone=offyougo&127
#Get the highest 7 bits
bytetwo=(offyougo-(offyougo&127))/128
#move to an absolute position in 8-bit mode (0x04 for the mode,
#0 for the servo, 0-255 for the position (spread over two bytes))
bud=chr(0x80)+chr(0x01)+chr(0x04)+chr(n)+chr(bytetwo)+chr(byteone)
ser.write(bud)
def initiate_serial():
global ser
ports = list_ports.comports()
if(len(ports) == 0):
print("no com ports found")
rospy.logerr("ERROR: no com ports found")
raise
port = ports[0].device
port = '/dev/ttyS0' #rpi serial port doesn't enumerate properly on pi3
ser = serial.Serial(port)
print("using port: ", port)
ser.baudrate = 38400
for i in servomap:
set_speed(servomap[i][0], speed)
def handle_msg(servoMsg):
rospy.loginfo(rospy.get_caller_id() + "new servo command")
set_pos(servomap['BLHip'][0 | ], servoMsg.BLHip, servomap['BLHip'][1])
set_pos(servomap['BRHip'][0], servoMsg.BRHip, servomap['BRHip'][1])
set_pos(servomap['FLHip'][0], servoMsg.FLHip, servomap['FLHip'][1])
set_pos(servomap['FRHip'][0], servoMsg.FRHip, servomap['FRHip'][1])
set_pos(servomap['BLLeg'][0], ser | voMsg.BLLeg, servomap['BLLeg'][1])
set_pos(servomap['BRLeg'][0], servoMsg.BRLeg, servomap['BRLeg'][1])
set_pos(servomap['FLLeg'][0], servoMsg.FLLeg, servomap['FLLeg'][1])
set_pos(servomap['FRLeg'][0], servoMsg.FRLeg, servomap['FRLeg'][1])
def create_listener_node():
rospy.init_node('quad_servo_controller')
rospy.Subscriber('servo_controller', QuadServos, handle_msg)
rospy.spin()
ser.close()
if __name__ == "__main__":
initiate_serial()
create_listener_node()
|
pqtoan/mathics | mathics/builtin/natlang.py | Python | gpl-3.0 | 48,237 | 0.002218 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Natural language functions
"""
# PYTHON MODULES USED IN HERE
# spacy: everything related to parsing natural language
# nltk: functions using WordNet
# pattern: finding inflections (Pluralize[] and WordData[])
# langid, pycountry: LanguageIdentify[]
# pyenchant: SpellingCorrectionList[]
# If a module is not installed, all functions will gracefully alert the user to the missing functionality/packages.
# All modules above (except for "pattern", see below) are available in Python 2 and Python 3 versions.
# ADDING MORE LANGUAGES TO OMW
# In order to use the Extended Open Multilingual Wordnet with NLTK and use even more languages, you need to install
# them manually. Go to http://compling.hss.ntu.edu.sg/omw/summx.html, download the data, and then create a new folder
# under $HOME/nltk_data/corpora/omw/your_language where you put the file from wiki/wn-wikt-your_language.tab, and
# rename it to wn-data-your_language.tab.
# PATTERN MODULE
# This module uses "pattern" for various tasks (http://www.clips.ua.ac.be/pattern). Pattern currently only supports
# Python 2, but there exists for Python 3 an official, though preliminary version: https://github.com/pattern3/pattern,
# which is not in pip, but can be installed manually. on OS X make sure to "pip install Pillow lxml" and after
# installing, uncompress pattern.egg into folder. you may also need to run "export STATIC_DEPS=true" before you run
# setup.py
# PYENCHANT MODULE
# "pyenchant" OS X python 3 fix: patch enchant/_enchant.py:
# prefix_dir.contents = c_char_p(e_dir.encode('utf8'))
# PORTABLE INSTALLATION
# for nltk, use the environment variable NLTK_DATA to specify a custom data path (instead of $HOME/.nltk).
# for spacy, use SPACY_DATA; the latter is a custom Mathics variable.
from mathics.builtin.base import Builtin, MessageException
from mathics.builtin.randomnumbers import RandomEnv
from mathics.builtin.codetables import iso639_3
from mathics.builtin.strings import to_regex, anchor_pattern
from mathics.core.expression import Expression, String, Integer, Real, Symbol, strip_context
import os
import re
import itertools
from itertools import chain
import heapq
import math
def _parse_nltk_lookup_error(e):
m = re.search("Resource '([^']+)' not found\.", str(e))
if m:
return m.group(1)
else:
return 'unknown'
def _make_forms():
forms = {
'Word': lambda doc: (token for token in doc),
'Sentence': lambda doc: (sent for sent in doc.sents),
'Paragraph': lambda doc: _fragments(doc, re.compile(r"^[\n][\n]+$")),
'Line': lambda doc: _fragments(doc, re.compile(r"^[\n]$")),
'URL': lambda doc: (token for token in doc if token.orth_.like_url()),
'EmailAddress': lambda doc: (token for token in doc if token.orth_.like_email()),
}
def filter_named_entity(label):
def generator(doc):
for ent in doc.ents:
if ent.label == label:
yield ent
return generator
def filter_pos(pos):
def generator(doc):
for token in doc:
if token.pos == pos:
yield token
return generator
for name, symbol in _symbols.items():
forms[name] = filter_named_entity(symbol)
for tag, names in _pos_tags.items():
name, phrase_name = names
forms[name] = filter_pos(tag)
return forms
# the following two may only be accessed after_WordNetBuiltin._load_wordnet has
# been called.
_wordnet_pos_to_type = {}
_wordnet_type_to_pos = {}
try:
import nltk
def _init_nltk_maps():
_wordnet_pos_to_type.update({
nltk.corpus.wordnet.VERB: 'Verb',
nltk.corpus.wordnet.NOUN: 'Noun',
nltk.corpus.wordnet.ADJ: 'Adjective',
nltk.corpus.wordnet.ADJ_SAT: 'Adjective',
nltk.corpus.wordnet.ADV: 'Adverb',
})
_wordnet_type_to_pos.update({
'Verb': [nltk.corpus.wordnet.VERB],
'Noun': [nltk.corpus.wordnet.NOUN],
'Adjective': [nltk.corpus.wordnet.ADJ, nltk.corpus.wordnet.ADJ_SAT],
'Adverb': [nltk.corpus.wordnet.ADV],
})
except ImportError:
pass
try:
import spacy
from spacy.tokens import Span
# Part of speech tags and their public interface names in Mathics
# see http://www.mathcs.emory.edu/~choi/doc/clear-dependency-2012.pdf
_pos_tags = {
spacy.parts_of_speech.ADJ: ('Adjective', ''),
spacy.parts_of_speech.ADP: ('Preposition', 'Prepositional Phrase'),
spacy.parts_of_speech.ADV: ('Adverb', ''),
spacy.parts_of_speech.CONJ: ('Conjunct', ''),
spacy.parts_of_speech.DET: ('Determiner', ''),
spacy.parts_of_speech.INTJ: ('Interjection', ''),
spacy.parts_of_speech.NOUN: ('Noun', 'Noun Phrase'),
spacy.parts_of_speech.NUM: ('Number', ''),
spacy.parts_of_speech.PART: ('Particle', ''),
spacy.parts_of_speech.PRON: ('Pronoun', ''),
spacy.parts_of_speech.PROPN: ('Proposition', ''),
spacy.parts_of_speech.PUNCT: ('Punctuation', ''),
spacy.parts_of_speech.SCONJ: ('Sconj', ''),
spacy.parts_of_speech.SYM: ('Symbol', ''),
spacy.parts_of_speech.VERB: ('Verb', 'Verb Phrase'),
spacy.parts_of_speech.X: ('X', ''),
spacy.parts_of_speech.EOL: ('EOL', ''),
spacy.parts_of_speech.SPACE: ('Space', ''),
}
# Mathics named entitiy names and their corresponding constants in spacy.
_symbols = {
'Person': spacy.symbols.PERSON,
'Company': spacy.symbols.ORG,
'Quantity': spacy.symbols.QUANTITY,
'Number': spacy.symbols.CARDINAL,
'CurrencyAmount': spacy.symbols.MONEY,
'Country': spacy.symbols.GPE, # also includes cities and states
'City': spacy.symbols.GPE, # also includes countries and states
}
# forms are everything one can use in TextCases[] or TextPosition[].
_forms = _make_forms()
except ImportError:
_pos_tags = {}
_symbols = {}
_forms = {}
def _merge_dictionaries(a, b):
c = a.copy()
c.update(b)
return c
def _position(t):
if isinstance(t, Span):
l = t.doc[t.start]
r = t.doc[t.end - 1]
return 1 + l.idx, r.idx + len(r.text)
| else:
return 1 + t.idx, t.idx + len(t.text)
def _fragments(doc, sep):
start = 0
for i, to | ken in enumerate(doc):
if sep.match(token.text):
yield Span(doc, start, i)
start = i + 1
end = len(doc)
if start < end:
yield Span(doc, start, end)
class _SpacyBuiltin(Builtin):
requires = (
'spacy',
)
options = {
'Language': '"English"',
}
messages = {
'runtime': 'Spacy gave the following error: ``',
'lang': 'Language "`1`" is currently not supported with `2`[].',
}
_language_codes = {
'English': 'en',
'German': 'de',
}
_spacy_instances = {}
def _load_spacy(self, evaluation, options):
language_code = None
language_name = self.get_option(options, 'Language', evaluation)
if language_name is None:
language_name = String('Undefined')
if isinstance(language_name, String):
language_code = _SpacyBuiltin._language_codes.get(language_name.get_string_value())
if not language_code:
evaluation.message(self.get_name(), 'lang', language_name, strip_context(self.get_name()))
return None
instance = _SpacyBuiltin._spacy_instances.get(language_code)
if instance:
return instance
try:
if 'SPACY_DATA' in os.environ:
instance = spacy.load(language_code, via=os.environ['SPACY_DATA'])
else:
instance = spacy.load(language_code)
_SpacyBuiltin._spacy_instances[language_code] = instance
return instance
except RuntimeError as e:
evaluation.message(self.get_name(), 'runtime', str(e))
return None
def _nlp(self, text, evaluation, options):
nlp = self._l |
hustlzp/Flask-Boost | flask_boost/project/application/controllers/account.py | Python | mit | 1,215 | 0 | # coding: utf-8
from flask import render_template, Blueprint, redirect, request, url_for
from ..forms import SigninForm, SignupForm
from ..utils.account import signin_user, signout_user
from ..utils.permissions import VisitorPermission, UserPermission
from ..models import db, User
bp = Blueprint('account', __name | __)
@bp.route('/signin', methods=['GET', 'POST'])
@VisitorPermission()
def signin():
"""Signin"""
form = SigninForm()
if form.valid | ate_on_submit():
signin_user(form.user)
return redirect(url_for('site.index'))
return render_template('account/signin/signin.html', form=form)
@bp.route('/signup', methods=['GET', 'POST'])
@VisitorPermission()
def signup():
"""Signup"""
form = SignupForm()
if form.validate_on_submit():
params = form.data.copy()
params.pop('repassword')
user = User(**params)
db.session.add(user)
db.session.commit()
signin_user(user)
return redirect(url_for('site.index'))
return render_template('account/signup/signup.html', form=form)
@bp.route('/signout')
def signout():
"""Signout"""
signout_user()
return redirect(request.referrer or url_for('site.index'))
|
Azure/azure-sdk-for-python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2021_06_01_preview/aio/operations/_scope_maps_operations.py | Python | mit | 24,899 | 0.004659 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._scope_maps_operations import build_create_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ScopeMapsOperations:
"""ScopeMapsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
**kwargs: Any
) -> "_models.ScopeMap":
"""Gets the properties of the specified scope map.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param scope_map_name: The name of the scope map.
:type scope_map_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ScopeMap, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ScopeMap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_mode | ls.ScopeMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
| registry_name=registry_name,
scope_map_name=scope_map_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
scope_map_create_parameters: "_models.ScopeMap",
**kwargs: Any
) -> "_models.ScopeMap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(scope_map_create_parameters, 'ScopeMap')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ScopeMap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
scope_map_create_parameters: "_models.ScopeMap",
**kwargs: Any
) -> AsyncLROPoller["_models.ScopeMap"]:
"""Creates a scope map for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param scope_map_name: The name of the scope map.
:type scope_map_name: str
:param scope_map_create_parameters: The parameters for creating a scope map.
:type scope_map_create_parameters:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ScopeMap
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass |
openstack/designate | api-ref/source/conf.py | Python | apache-2.0 | 6,722 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific | language governing permissions and limitations
# under the License.
#
# ironic documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented ou | t
# serve to show the default.
import os
import sys
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_mode": "toc",
"sidebar_dropdown": "api_ref",
}
extensions = [
'os_api_ref',
'openstackdocstheme'
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Designate API Reference'
copyright = u'OpenStack Foundation'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/designate'
openstackdocs_bug_project = 'designate'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'designatedoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Designate.tex', u'OpenStack DNS API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
|
telefonicaid/fiware-sinfonier | sinfonier-backend-api/config/config.py | Python | apache-2.0 | 525 | 0 | import os
import sys
from utils.SinfonierConstants import Environment as EnvConst
SINFONIER_API_NAME = os.environ[EnvConst.SINFONIER_ENV_KEY]
if SINFONIER_API_NAME == EnvConst.DEVELOP_ENVIRONMENT:
from environmentConfig. | Develop import *
elif SINFONIER_API_NAME == Env | Const.PROD_ENVIRONMENT:
from environmentConfig.Production import *
elif SINFONIER_API_NAME == EnvConst.DOCKER_ENVIRONMENT:
from environmentConfig.Docker import *
else:
sys.exit('ERROR: Environment not found: ' + EnvConst.SINFONIER_ENV_KEY)
|
Som-Energia/somenergia-tomatic | tomatic/scheduling_test.py | Python | gpl-3.0 | 24,860 | 0.010666 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import unittest
from datetime import datetime, timedelta
from yamlns.dateutils import Date
from yamlns import namespace as ns
from .scheduling import (
weekday,
weekstart,
nextweek,
choosers,
Scheduling,
)
class Scheduling_Test(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def assertNsEqual(self, dict1, dict2):
def parseIfString(nsOrString):
if type(nsOrString) in (dict, ns):
return nsOrString
return ns.loads(nsOrString)
def sorteddict(d):
if type(d) not in (dict, ns):
return d
return ns(sorted(
(k, sorteddict(v))
for k,v in d.items()
))
dict1 = sorteddict(parseIfString(dict1))
dict2 = sorteddict(parseIfString(dict2))
return self.assertMultiLineEqual(dict1.dump(), dict2.dump())
# weekday
def test_weekday_withSunday(self):
self.assertEqual(
'dg', weekday(Date("2017-10-01")))
def test_weekday_withMonday(self):
self.assertEqual(
'dl', weekday(Date("2017-10-02")))
def test_weekday_withWenesday(self):
self.assertEqual(
'dx', weekday(Date("2017-10-04")))
# weekstart
def test_weekstart_withMonday(self):
self.assertEqual(
weekstart(Date("2017-10-02")),
Date("2017-10-02"))
def test_weekstart_withFriday(self):
self.assertEqual(
weekstart(Date("2017-10-06")),
Date("2017-10-02"))
# nextweek
def test_nextweek_withMonday(self):
self.assertEqual(
nextweek(Date("2017-10-02")),
Date("2017-10-09"))
def test_nextweek_withFriday(self):
self.assertEqual(
nextweek(Date("2017-10-06")),
Date("2017-10-09"))
# Scheduling.extension
def test_extension_existing(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extension('cesar'),
'200')
def test_extension_badExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extension('notExisting'),
None)
# extensionToName
def test_extensionToName_stringExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extensionToName('200'),
'cesar')
def test_extensionToName_intExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extensionToName(200),
'cesar')
def test_extensionToName_missingExtensionReturnsExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extensionToName('100'),
'100')
# Scheduling.properName
def test_properName_whenPresent(self):
schedule = Scheduling("""\
names:
cesar: César
""")
self.assertEqual(
schedule.properName('cesar'),
u'César')
def test_properName_missing_usesTitle(self):
schedule = Scheduling("""\
names:
cesar: César
""")
self.assertEqual(
| schedule.properName('perico'),
u'Perico')
def test_properName_noNamesAtAll(self):
schedule = Scheduling("""\
otherkey:
""")
self.assertEqual(
schedule.properName('perico'),
u'Perico')
# Scheduling.intervals
def test_intervals_withOneDate_notEnough(self):
schedule = Scheduling("""\
hours:
- '09:00'
| """)
self.assertEqual(
schedule.intervals(), [
])
def test_intervals_withTwoDates(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
""")
self.assertEqual(
schedule.intervals(), [
'09:00-10:15',
])
def test_intervals_withMoreThanTwo(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.intervals(), [
'09:00-10:15',
'10:15-11:30',
])
# Scheduling.peekInterval
def test_peekInterval_beforeAnyInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("08:59"),None)
def test_peekInterval_justInFirstInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("09:00"),0)
def test_peekInterval_justBeforeNextInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("10:14"),0)
def test_peekInterval_justInNextInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("10:15"),1)
def test_peekInterval_justAtTheEndOfLastInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("11:29"),1)
def test_peekInterval_pastLastInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("11:30"),None)
def test_peekInterval_withNoHours(self):
schedule = Scheduling("""\
other:
""")
with self.assertRaises(Exception) as ctx:
schedule.peekInterval("11:30")
self.assertEqual(str(ctx.exception),
"Schedule with no hours attribute")
# choosers
def test_choosers(self):
now = datetime(2017,10,20,15,25,35)
self.assertEqual(
choosers(now),
("2017-10-16", 'dv', "15:25"))
# Scheduling.peekQueue
def test_peekQueue_oneSlot_oneTurn(self):
schedule = Scheduling(u"""\
timetable:
dl:
-
- cesar
hours:
- '00:00'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'cesar',
])
def test_peekQueue_oneSlot_twoTurns(self):
schedule = Scheduling(u"""\
timetable:
'dl':
-
- cesar
- eduard
hours:
- '00:00'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'cesar',
'eduard',
])
def test_peekQueue_twoTimes(self):
schedule = Scheduling(u"""\
timetable:
dl:
-
- cesar
-
- eduard
hours:
- '00:00'
- '12:00'
- '23:59'
extensions:
cesar: 200
eduard: 201
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'eduard',
])
def test_peekQueu |
shepdelacreme/ansible | test/units/module_utils/test_distribution_version.py | Python | gpl-3.0 | 39,710 | 0.002518 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from itertools import product
import pytest
# the module we are actually testing (sort of)
from ansible.module_utils.facts.system.distribution import DistributionFactCollector
# to generate the testcase data, you can use the script gen_distribution_version_testcase.py in hacking/tests
TESTSETS = [
{
"platform.dist": [
"centos",
"7.2.1511",
"Core"
],
"input": {
"/etc/redhat-release": "CentOS Linux release 7.2.1511 (Core) \n",
"/etc/os-release": (
"NAME=\"CentOS Linux\"\nVERSION=\"7 (Core)\"\nID=\"centos\"\nID_LIKE=\"rhel fedora\"\nVERSION_ID=\"7\"\n"
"PRETTY_NAME=\"CentOS Linux 7 (Core)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:centos:centos:7\"\n"
"HOME_URL=\"https://www.centos.org/\"\nBUG_REPORT_URL=\"https://bugs.centos.org/\"\n\nCENTOS_MANTISBT_PROJECT=\"CentOS-7\"\n"
"CENTOS_MANTISBT_PROJECT_VERSION=\"7\"\nREDHAT_SUPPORT_PRODUCT=\"centos\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7\"\n\n"
),
"/etc/system-release": "CentOS Linux release 7.2.1511 (Core) \n"
},
"name": "CentOS 7.2.1511",
"result": {
"distribution_release": "Core",
"distribution": "CentOS",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.2.1511",
}
},
{
"name": "CentOS 6.7",
"platform.dist": [
"centos",
"6.7",
"Final"
],
"input": {
"/etc/redhat-release": "CentOS release 6.7 (Final)\n",
"/etc/lsb-release": (
"LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:"
"printing-4.0-amd64:printing-4.0-noarch\n"
),
"/etc/system-release": "CentOS release 6.7 (Final)\n"
},
"result": {
"distribution_release": "Final",
"distribution": "CentOS",
"distribution_major_version": "6",
"os_family": "RedHat",
"distribution_version": "6.7"
}
},
{
"name": "RedHat 7.2",
"platform.dist": [
"redhat",
"7.2",
"Maipo"
],
"input": {
"/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n",
"/etc/os-release": (
"NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.2 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVERSION_ID=\"7.2\"\n"
"PRETTY_NAME=\"Red Hat Enterprise Linux Server 7.2 (Maipo)\"\nANSI_COLOR=\"0;31\"\n"
"CPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.2:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\n"
"BUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\n"
"REDHAT_BUGZILLA_PRODUCT_VERSION=7.2\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\n"
"REDHAT_SUPPORT_PRODUCT_VERSION=\"7.2\"\n"
),
"/etc/system-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n"
},
"result": {
"distribution_release": "Maipo",
"distribution": "RedHat",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.2"
}
},
{
"name": "RedHat 6.7",
"platform.dist": [
"redhat",
"6.7",
"Santiago"
],
"input": {
"/etc/redhat-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n",
"/etc/lsb-release": (
"LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:"
"printing-4.0-amd64:printing-4.0-noarch\n"
),
"/etc/system-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n"
},
"result": {
"distribution_release": "Santiago",
"distribution": "RedHat",
"distribution_major_version": "6",
"os_family": "RedHat",
"distribution_version": "6.7"
}
},
{
"name": "Virtuozzo 7.3",
"platform.dist": [
"redhat",
"7.3",
""
],
"input": {
"/etc/redhat-release": "Virtuozzo Linux release 7.3\n",
"/etc/os-release": (
"NAME=\"Virtuozzo\"\n"
"VERSION=\"7.0.3\"\n"
"ID=\"virtuozzo\"\n"
"ID_LIKE=\"rhel fedora\"\n"
"VERSION_ID=\"7\"\n"
"PRETTY_NAME=\"Virtuozzo release 7.0.3\"\n"
"ANSI_COLOR=\"0;31\"\n"
"CPE_NAME=\"cpe:/o:virtuozzoproject:vz:7\"\n"
"HOME_URL=\"http://www.virtuozzo.com\"\n"
"BUG_REPORT_URL=\"https://bugs.openvz.org/\"\n"
),
"/etc/system-release": "Virtuozzo release 7.0.3 (640)\n"
},
"result": {
"distribution_release": "NA",
"distribution": "Virtuozzo",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.3"
}
},
{
"name": "openSUSE Leap 42.1",
"input": {
"/etc/os-release": """
NAME="openSUSE Leap"
VERSION="42.1"
VERSION_ID="42.1"
PRETTY_NAME="openSUSE Leap 42.1 (x86_64)"
ID=opensuse
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:opensuse:42.1"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://opensuse.org/"
ID_LIKE="sus | e"
""",
"/etc/SuSE-release": """
openSUSE 42.1 (x86_64)
VERSION = 42.1
CODENAME = Malachite
# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead
"""
},
"platform.dist": ['SuSE', '42.1', 'x86_64'],
"result": {
"distribution": "openSUSE Leap",
"distribution_major_version": "42 | ",
"distribution_release": "1",
"os_family": "Suse",
"distribution_version": "42.1",
}
},
{
'name': 'openSUSE 13.2',
'input': {
'/etc/SuSE-release': """openSUSE 13.2 (x86_64)
VERSION = 13.2
CODENAME = Harlequin
# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead
""",
'/etc/os-release': """NAME=openSUSE
VERSION="13.2 (Harlequin)"
VERSION_ID="13.2"
PRETTY_NAME="openSUSE 13.2 (Harlequin) (x86_64)"
ID=opensuse
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:opensuse:13.2"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://opensuse.org/"
ID_LIKE="suse"
"""
},
'platform.dist': ('SuSE', '13.2', 'x86_64'),
'result': {
'distribution': u'openSUSE',
'distribution_major_version': u'13',
'distribution_release': u'2',
'os_family': u'Suse',
'distribution_version': u'13.2'
}
},
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": (
"NAME=\"openSUSE Tumbleweed\"\n# VERSION=\"20160917\"\nID=opensuse\nID_LIKE=\"suse\"\nVERSION_ID=\"20160917\"\n"
"PRETTY_NAME=\"openSUSE Tumbleweed\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:tumbleweed:20160917\"\n"
"BUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
)
},
"name": "openSUSE Tumbleweed 20160917",
"result": {
"distribution_release": "",
"distribution": "openSUSE Tumbleweed",
"distribution_major_version": "20160917",
"os_family |
vagabondize/MyDemo | python/Multiplication.py | Python | mit | 300 | 0.01 | print(' Multiplication Table')
print(" ", end = '')
for j in range(1, 10):
print(" ", j, end="")
print()
print('---------------------------------------')
for i in range(1, 10):
print(i, | "|", end="")
| for j in range(1, 10):
print(format(i * j, "3d"), end="")
print()
|
ThinkboxSoftware/Deadline | Custom/events/PriorityClamp/PriorityClamp.py | Python | apache-2.0 | 2,103 | 0.001427 | ###############################################################
# Imports
###############################################################
from System.Diagnostics import *
from System.IO import *
from System import TimeSpan
from Deadline.Events import *
from Deadline.Scripting import *
import re
import sys
import os
###############################################################
# This is the function called by Deadline to get an instance of the job event listener.
###############################################################
def GetDeadlineEventListener():
return JobEventListener()
def CleanupDeadlineEventListener(eventListener):
eventListener.Cleanup()
###############################################################
# Priority clamp event listener class.
###############################################################
class JobEventListener (DeadlineEventListener): |
def __init__(self | ):
self.OnJobSubmittedCallback += self.OnJobSubmitted
def Cleanup(self):
del self.OnJobSubmittedCallback
# This is called when a job is submitted.
def OnJobSubmitted(self, job):
user = job.JobUserName
priority = 0
priority_map = self.GetConfigEntry('PriorityMap')
groups = RepositoryUtils.GetUserGroupsForUser(user)
priorities = priority_map.split(';') # Breaks up into groups
for i in priorities:
# Skip over empty lines
if len(i) < 1:
continue
(group, group_priority) = tuple(i.split('<'))
# Clean up any accidental whitespace
group = group.strip()
group_priority = int(group_priority.strip())
if group in groups:
priority = max(priority, group_priority)
print("Allowed job priority upgraded because you're a member of {0}. Currently {1}.".format(group, priority))
if priority > 0 and job.JobPriority > priority:
job.JobPriority = priority
RepositoryUtils.SaveJob(job)
|
myuuuuun/ThinkStats2-Notebook | chap3ex.py | Python | gpl-2.0 | 4,243 | 0.006178 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Sample Codes for ThinkStats2 - Chapter2
Copyright 2015 @myuuuuun
URL: https://github.com/myuuuuun/ThinkStats2-Notebook
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import division, print_function
import sys
sys.path.append('./code')
sys.path.append('../')
import pandas as pd
import nsfg
import relay
import custom_functions as cf
import sys
import math
import numpy as np
import thinkstats2
import thinkplot
# Ex.1
# 実際の1家族あたりの子供の人数の分布を求める
def Pmf(data):
pmf = thinkstats2.Pmf(data, label='actual pmf')
return pmf
# 適当な子どもに対してその家庭の子どもの人数を聞いた時に出てくる、バイアスのかかった子供の人数の分布を求める
def BiasedPmf(data):
pmf = Pmf(data)
new_pmf = pmf.Copy(label='biased pmf')
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
# pmfを与えて、平均を返す
def PmfMean(pmf):
pmf.Normalize()
average = sum([prob * value for value, prob in pmf.Items()])
return average
"""
# 分布を比較
df = cf.ReadFemResp()
numkdhh = df.numkdhh
actual_pmf = Pmf(numkdhh)
biased_pmf = BiasedPmf(numkdhh)
thinkplot.PrePlot(2)
thinkplot.Pmfs([actual_pmf, biased_pmf])
thinkplot.Show(xlabel='class size', ylabel='PMF')
# 平均を比較
print("Actual average: ", PmfMean(actual_pmf))
print("Biased average: ", PmfMean(biased_pmf))
"""
# Ex.2
# pmfを与えて、分散を返す
def PmfVar(pmf):
pmf.Normalize()
average = PmfMean(pmf)
# これは効率が悪い
#variance = sum([prob * pow(value - average, 2) for value, prob in pmf.Items()])
# こっちの方がいい
# Var(x) = E[x^2] - (E[x])^2 を利用
variance = sum([prob * pow(value, 2)]) - pow(average, 2)
return variance
"""
df = cf.ReadFemResp()
numkdhh = df.numkdhh
pmf = Pmf(numkdhh)
print("Average(by my func): ", PmfMean(pmf))
print("Average(by method): ", pmf.Mean())
print("Variance(by my func): ", PmfVar(pmf))
print("Variance(by method): ", pmf.Var())
"""
# Ex.3
# caseidから2人以上出産している人を調べ、最初の赤ちゃんとそれ以外の赤ちゃんの妊娠期間に差があるか調べる
# http://www.icpsr.umich.edu/nsfg6/Controller?displayPage=labelDetails&fileCode=PREG§ion=A&subSec=8016&srtLabel=611933
def ex3():
df = nsfg.ReadFemPreg()
birthord = df['birthord']
prglngth = df['prglngth']
# {caseid: [index, index,...]}という辞書に変換
pregmap = nsfg.MakePregMap(df)
weeks_first = []
weeks_others = []
for caseid, pregs in pregmap.items():
# birthordがnanのケースを除いて、{birthord: index}という辞書を作る
live_pregs = {int(birthord.loc[preg]): preg for preg in pregs if not math.isnan(birthord.loc[preg])}
if len(live_pregs) > 1:
for order, preg_index in live_pregs.items():
if order == 1:
weeks_first.append(prglngth.loc[preg_index])
else:
weeks_others.append(prglngth.loc[preg_index])
return weeks_first, weeks_others
"""
weeks_first, weeks_others = ex3()
first = sum(weeks_first) / len(weeks_first)
others = sum(weeks_others) / len(weeks_others)
print("1人目の妊娠期間の平均は: ", first, "weeks")
print("他の妊娠期間の平均は: ", others, "weeks")
print("Cohenのdは: ", cf.CohenEffectSize(np.array(weeks_first), np.array(weeks_others)))
"""
# Ex.4 リレー大会で、選手全員の速度と、自分が7.5mphで走った時にすれ違う選手の速度の分布の違いをplot
def ObservedPmf(pmf, speed):
new_pmf = pmf.Copy(label='observed pmf')
average = pmf.Mean()
for speed, prob in pmf.Items():
new_pmf.Mult(speed, speed * ma | th.fabs(speed - average))
new_pmf.Normalize()
return new_pmf
def ex4():
pmf = relay.pmf()
observed = ObservedPmf(pmf, 7.5)
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, observed])
thinkplot.Sho | w(title='PMF of running speed',
xlabel='speed (mph)',
ylabel='probability')
#ex4()
|
rkarlberg/opencog | tests/cython/test_agent_finder.py | Python | agpl-3.0 | 400 | 0.0125 | from unittest import TestCase
from agent_finder import find_subclasses
i | mport opencog.cogserver
import test_agent |
class HelperTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_find_agents(self):
x=find_subclasses(test_agent,opencog.cogserver.MindAgent)
self.assertEqual(len(x),1)
self.assertEqual(x[0][0], 'TestAgent')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.