repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
WSULib/combine
|
combine/urls.py
|
Python
|
mit
| 818
| 0
|
"""combine URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app
|
import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contr
|
ib import admin
urlpatterns = [
url(r'^combine/', include('core.urls')),
url(r'^admin/', admin.site.urls),
]
|
hacklab-fi/hhlevents
|
hhlevents/apps/hhlregistrations/admin.py
|
Python
|
bsd-3-clause
| 2,455
| 0.003666
|
# -*- coding: UTF-8 -*-
from django.contrib import admin
from django.db import models
from django_markdown.admin import MarkdownModelAdmin, AdminMarkdownWidget
from django_markdown.models import MarkdownField
from happenings.models import Event as HappeningsEvent
from happenings.admin import EventAdmin as HappeningsEventAdmin
from happenings.admin import CancellationInline
from django.utils.translation import ugettext as _
from .models import Event, Person, Registration
class EventAdmin(HappeningsEventAdmin):
fieldsets = (
(None, {
'fields': ('start_date', 'end_date', 'all_day', 'repeat',
'end_repeat', 'title', 'description',
'created_by', 'extra_url', 'gforms_url', 'image',
)
}),
('Location', {
'fields': ('location',)
}),
('Registrations', {
'classes': ('collapse',),
'fields': ( 'registration_requirement', 'max_registrations', 'close_registrations',
'event_cost', 'materials_cost', 'materials_mandatory',
|
'payment_due', 'hide_join_checkbox',
)
}),
('Cate
|
gory', {
'classes': ('collapse',),
'fields': ('categories',)
}),
('Tag', {
'classes': ('collapse',),
'fields': ('tags',)
}),
('Color', {
'classes': ('collapse',),
'fields': (
('background_color', 'background_color_custom'),
('font_color', 'font_color_custom'),
)
}),
)
formfield_overrides = {
MarkdownField: {'widget': AdminMarkdownWidget},
models.TextField: {'widget': AdminMarkdownWidget},
}
list_display = ('title', 'start_date', 'end_date', 'repeat', 'end_repeat', 'formLink')
list_filter = ['start_date']
search_fields = ['title']
date_hierarchy = 'start_date'
inlines = [CancellationInline]
class RegistrationAdmin(admin.ModelAdmin):
search_fields = ['event__title', 'person__first_name', 'person__last_name', 'person__email']
list_filter = ['state']
list_display = ('person','event', 'state')
# Remove the happenings event admin
admin.site.unregister(HappeningsEvent)
# And use our own
admin.site.register(Event, EventAdmin)
admin.site.register(Person)
admin.site.register(Registration, RegistrationAdmin)
|
jeanmask/opps
|
opps/channels/admin.py
|
Python
|
mit
| 3,690
| 0.000813
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from mptt.admin import MPTTModelAdmin
from .models import Channel
from .forms import ChannelAdminForm
from opps.core.admin import PublishableAdmin
from opps.core.admin import apply_opps_rules
from opps.core.permissions.admin import AdminViewPermission
from opps.core.utils import get_template_path
import json
@apply_opps_rules('channels')
class ChannelAdmin(PublishableAdmin, MPTTModelAdmin, AdminViewPermission):
prepopulated_fields = {"slug": ("name",)}
list_display = ['name', 'show_channel_path', 'get_parent', 'site',
'date_available', 'homepage', 'order', 'show_in_menu',
'published']
list_filter = ['date_available', 'published', 'site', 'homepage', 'parent',
'show_in_menu']
search_fields = ['name', 'slug', 'long_slug', 'description']
exclude = ('user', 'long_slug')
raw_id_fields = ['parent', 'main_image']
form = ChannelAdminForm
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'parent', 'name', 'slug', 'layout', 'hat',
'description', 'main_image',
'order', ('show_in_menu', 'include_in_main_rss'),
'homepage', 'group', 'paginate_by')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
|
'fields': ('published',
|
'date_available')}),
)
def get_parent(self, obj):
if obj.parent_id:
long_slug, slug = obj.long_slug.rsplit("/", 1)
return long_slug
get_parent.admin_order_field = "parent"
get_parent.short_description = "Parent"
def show_channel_path(self, obj):
return unicode(obj)
show_channel_path.short_description = _(u'Channel Path')
def save_model(self, request, obj, form, change):
long_slug = u"{0}".format(obj.slug)
if obj.parent:
long_slug = u"{0}/{1}".format(obj.parent.slug, obj.slug)
obj.long_slug = long_slug
super(ChannelAdmin, self).save_model(request, obj, form, change)
def get_form(self, request, obj=None, **kwargs):
form = super(ChannelAdmin, self).get_form(request, obj, **kwargs)
channel_json = []
def _get_template_path(_path):
template = get_template_path(_path)
with open(template) as f:
_jsonData = f.read().replace('\n', '')
return json.loads(_jsonData)
def _get_json_channel(_obj):
return _get_template_path(
u'containers/{0}/channel.json'.format(_obj.long_slug))
def _get_json_channel_recursivelly(_obj):
channel_json = []
try:
channel_json = _get_json_channel(_obj)
except:
_is_root = _obj.is_root_node()
if not _is_root:
channel_json = _get_json_channel_recursivelly(_obj.parent)
elif _is_root:
try:
channel_json = _get_template_path(
u'containers/channel.json')
except:
pass
finally:
return channel_json
channel_json = _get_json_channel_recursivelly(obj)
if u'layout' in channel_json:
layout_list = ['default'] + [l for l in channel_json['layout']]
layout_choices = (
(n, n.title()) for n in layout_list)
form.base_fields['layout'].choices = layout_choices
return form
admin.site.register(Channel, ChannelAdmin)
|
grantmcconnaughey/django-lazy-tags
|
lazy_tags/templatetags/lazy_tags.py
|
Python
|
mit
| 2,029
| 0
|
from django import template
from django.template.loader import render_to_string
from django.conf import settings
from ..utils import get_tag_id, set_lazy_tag_data
register = template.Library()
@register.simple_tag
def lazy_tag(tag, *args, **kwargs):
"""
Lazily loads a template tag after the page has loaded. Requires jQuery
(for now).
Usage:
{% load lazy_tags %}
{% lazy_tag 'tag_lib.tag_name' arg1 arg2 kw1='test' kw2='hello' %}
Args:
tag (str): the tag library and tag name separated by a period. For a
template tag named `do_thing` in a tag library named `thing_tags`
the `tag` argument would be `'thing_tags.doc_thing'`.
*args: arguments to be passed to the template tag.
**kwargs: keyword arguments to be passed to the template tag.
"""
tag_id = get_tag_id()
set_lazy_tag_data(tag_id, tag, args, kwargs)
return render_to_string('lazy_tags/lazy_tag.html', {
'tag_id': tag_id,
'STATIC_URL': settings.STATIC_URL,
})
def _render_js(library):
error_message = getattr(settings,
'LAZY_TAGS_ERROR_MESSAGE',
'An error occurred.')
template = 'lazy_tags/lazy_tags_{0}.html'.format(library)
return render_to_string(template, {
'error_message': error_message,
})
@registe
|
r.simple_tag
def lazy_tags_javascript():
"""Outputs the necessary JavaScript
|
to load tags over AJAX."""
return _render_js('javascript')
@register.simple_tag
def lazy_tags_jquery():
"""Outputs the necessary jQuery to load tags over AJAX."""
return _render_js('jquery')
@register.simple_tag
def lazy_tags_prototype():
"""Outputs the necessary Prototype to load tags over AJAX."""
return _render_js('prototype')
@register.simple_tag
def lazy_tags_js():
"""An alias to the JavaScript library specified in settings."""
library = getattr(settings, 'LAZY_TAGS_AJAX_JS', 'jquery')
return _render_js(library.lower())
|
ROB-Seismology/oq-hazardlib
|
openquake/hazardlib/tests/acceptance/disagg_test.py
|
Python
|
agpl-3.0
| 10,965
| 0.00073
|
# The Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy
from openquake.hazardlib.source import AreaSource
from openquake.hazardlib.pmf import PMF
from openquake.hazardlib.scalerel import WC1994
from openquake.hazardlib.gsim.boore_atkinson_2008 import BooreAtkinson2008
from openquake.hazardlib.calc import disagg
from openquake.hazardlib.geo import Point, Polygon, NodalPlane
from openquake.hazardlib.mfd import TruncatedGRMFD
from openquake.hazardlib.imt import SA
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.site import Site
class DisaggTestCase(unittest.TestCase):
def test_areasource(self):
nodalplane = NodalPlane(strike=0.0, dip=90.0, rake=0.0)
src = AreaSource(
source_id='src_1',
name='area source',
tectonic_region_type='Active Shallow Crust',
mfd=TruncatedGRMFD(a_val=3.5, b_val=1.0, min_mag=5.0,
max_mag=6.5, bin_width=0.1),
nodal_plane_distribution=PMF([(1.0, nodalplane)]),
hypocenter_distribution=PMF([(1.0, 5.0)]),
upper_seismogenic_depth=0.0,
lower_seismogenic_depth=10.0,
magnitude_scaling_relationship = WC1994(),
rupture_aspect_ratio=1.0,
polygon=Polygon([Point(-0.5,-0.5), Point(-0.5,0.5),
Point(0.5,0.5), Point(0.5,-0.5)]),
area_discretization=9.0,
rupture_mesh_spacing=1.0
)
site = Site(location=Point(0.0,0.0),
vs30=800.0,
vs30measured=True,
z1pt0=500.0,
z2pt5=2.0)
gsims = {'Active Shallow Crust': BooreAtkinson2008()}
imt = SA(period=0.1,damping=5.0)
iml = 0.2
time_span = 50.0
truncation_level = 3.0
n_epsilons = 3
mag_bin_width = 0.2
# in km
dist_bin_width = 10.0
# in decimal degree
coord_bin_width = 0.2
# compute disaggregation
bin_edges, diss_matrix = disagg.disaggregation_poissonian(
[src], site, imt, iml, gsims, time_span, truncation_level,
n_epsilons, mag_bin_width, dist_bin_width, coord_bin_width
)
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins, trt_bins = bin_edges
numpy.testing.assert_almost_equal(
mag_bins, [5., 5.2, 5.4, 5.6, 5.8, 6., 6.2, 6.4, 6.6]
)
numpy.testing.assert_almost_equal(
dist_bins, [0., 10., 20., 30., 40., 50., 60., 70., 80.]
)
numpy.testing.assert_almost_equal(
lat_bins, [-0.6, -0.4, -0.2, 0., 0.2, 0.4, 0.6]
)
numpy.testing.assert_almost_equal(
lon_bins, [-0.6, -0.4, -0.2, 0., 0.2, 0.4, 0.6]
)
numpy.testing.assert_almost_equal(
eps_bins, [-3., -1., 1., 3.]
)
self.assertEqual(trt_bins, ['Active Shallow Crust'])
expected_matrix = numpy.fromstring("""\
eJztnXlcTdv7x3eSJuVEKSWOg5LSPVEZytm7lESl5Ia4nG6GuF1FdUWGTcpYMpZolEa5hwgN7OIm
lEYNKOeWBlNFyZDqd/q9vq+v8717da99zz5N9vs/S6+1nr3Ws/Y6e33W8ywIoiCVcM+brec1YbSo
fvtn5mYYmsTNHN+wGP7v/591TK2FLWEoO1H1caMJ/Dc1kcupjGMOYWy8PRQU/REWFiS31xqGLsZ2
ii9e+9WfsZAw3S0TeOUlR+7RFvWgn5clIg/vs6AGh2O0JfZf22VvFJ3UaQhDl1W0LgQtoeYdxd9j
PV05eIIW3k+4j4I37lMSnv8EialczZ2Br/9EveoLNSN8uaeJ8uHYefhyJ5G0dT5Mwe3c35GQ7j8N
X8+8s/uhaB18edO8xfa2k/HlKCQr7kYXXr/N864wHm4IqL947M5VDGq+9xZIcI651SB8/2Pqj/UX
jMOXIwr6MoNGAvxHIzM/4zNLYHs4z+oSz2gL7g9cnzFwNcB+ooQnaLY6jxK8HvRjdtpyEvwclR4/
J08SMK9PmGP6gOcN74BFa8YDxuvLb+MzAOM+YCk5rqDyFuCfT94uPs8V3G+7xbkmbm0bvn705Rsl
pBXQbpLYFI13gPIIkzSVJsHtRH6OzvQdTIIfhlfVlrcA7Pl4ycUA9Fzd1fNcOb+dhPdGt1zMTJz+
5tvrx/Q6tDslAO/DZeLQKwgwj56J7b4C8Ct0j/sSxS9CfK7egmYejFwi4bmwe/HrQ0ioJ3bwoFsY
CfUw20xFrgDq4Ry6axADKOcefm2X24fG13XcuGG3+5A93cHZvWT3eRLsnGfhUpUCqqfO0ecaCfUv
LaiVB/kVp0R9HRn2U1BQUFBQUHx30INWx2VpwZDdp2v2u9fDkEX1xNG/zP/6fREuXxpdaQFDzB+M
tjrP6rnvdLVAhuKHn/D2UFD0R4Zr3R+WugSGRJ4u2juN/dWfZ/wSxkEMet7PnV5XltyYAUP175ct
zLP92u6KJQwDlgkMmdB2Xv/Rlpp3FH+PUo495AvQdxB4/nLvscLznya2vrPPbHz97rki6UXG+PLt
lon2BxYA9qslMcm3uoLbmW3XFtg5HV9PUHJeYwRAF6NZGjvdBOgL+ZnPO/+cILx+G5oXFpKFAMYr
eu9qfTVqvvcW2K+DG2yHAvzEwci6aRK+3Fo91FMToJOim8N/ow8RfBzZ0tCaVD0S/CHrED0aoPMS
xTplUPMdEnSrAO0y2w4S7GEf2Jl3fzi+Hva7qT7VgPFyrb0lrg84JwDdXHVbTOb7mXdIR2nSQoB/
ouJxbl6fhLefyX6EaCbSAP18lKNYDtKd3bSdZoB0lkR1mxIieiVt/89aZfjn4vpHnFsmT4K+bLjl
QhlABycK6qCeWScleD3YQ79pEiTouYiVtTdHGTC/LIwbReUA49Li9X6bKGAcy9pyG2UH4PwqeKSx
8TkJ8wVNkRCpIFCPu4mxeAbg76MfZiyrJMGeJT768wjoy2ipwrtUkJ7eW8yvM9/V2IfsOexok3kP
YM+tnKvL6gS3E82wcLf4SMLzcs30FUC64ZszcVqgcwgpFZ7qQP9fftXkOgn20PfboEG9MI50o1V/
HO1D/kPxDxx8JgfS5UmDVmkXTEL9+QkSjAgyzkvsefDam/JPCgqKAUCLMqdNDYYYjsmH3BxgKGCD
W2UC3/5Yi8tcl+B5MITR3NdfIOGc/LdyZWPKe42leHsoKPoj8fAGiyZ7GMpWassp5otndAqoXllh
CkO6unrtkHnP+Xnsa/kVaYB2PdVKtMvn97w9FP0Tp3Q35R8A+g5X8oL9JRLiPv4Kus61QL+FBbnG
Htu1aM7X+tHS+TbxCjA0I27U2myYL74ydqihthRvHalfvXU7QC9jJ10UXQHQrb6ZABns6WMWxB1j
an5+Jl+7wWefOYgD1s1aucK2KhaUr/vn/lxQfM1rxTs26sKbd1r67PB7gPi4cK85bEyI7VL8PeyN
YrEsgJ4SdH67r+tUfHnAtgmH5QA6KeL3a8BlEvSU/SPjxxQBdG2izJh4pkiMBH3ZdWgA4kOCfyqp
M6FnJPyORe+tj0YUATqXquvBHYB5vbT8WpMioD/ZNum61wDjPlDhzhr5+BJAv8DMo6XlxYTXD9yM
m7PSVb69fuz3I5LHATodlqh0bjWR+WVprrcBsH+LXnh/Q3YMCXqT2V2ddAUC9ayZW7CyGqDH+foc
fDWChHlx3My1FKDjE6VpjJcoHfR+u1z3NhcQV464ag12A4wL223hwXOAedrvaa/1ciUQ39cdaKP9
L8tA+kJ33MSedzwF/L3atftBVSTsi24+G5klQmC8ZGWj9PpQfB/KyMs1e9937IHWJe5K+RNgT7K7
9j0y+s1c9vY6QBw0YeLznuwA6LDYPo8YR5Cefj9z+xtQP684rXkQcN6gW5o8ntvHAf4+asveWaTE
FWpnXCYSDxhbUz/tQR/yH4q/pzg4vpCIvxHF+Xb2JzL80Hdic84jEup5bSiS1JfibSkoehL0PkMF
pfx/oND08K7xI953Bm01G8u3gyF0jb6OFN+534DTmSmMOTAUTqsNk5rYc98RhXNMM1QX4e2hoOiP
zI2MLlCzh6FYF6mCUIuv/ky7ZK1RbgZDElEPz/nDPefnOU9PYlMB7ebIxyaWzO95eyj6Ga5Bzluj
WZDneF13LmB/nu3e8qVICPpXd9C0WtqVdWAoKIQZqWvGp0MZpGvFM/DrCJq1eiV
|
DHIayrcPGnyJh
f/6vBDRI6pV3xYF4zP1Thl+Pk/L+tGE4fj1FfVRVrJtZEPPJuI2hU8i3BztYtLFqKAyVNW2WOcHi
q99OBJFu5LX7QTbUSwjtUgjGdW3vk+yZ+HGhBZ5I/gz4PYbZ3bazAegLRKnPVA8JJuF3F2eEy9pA
fRLirWyqtg0jIW4roPS8RxYoDosgaKFhmFYHQNc455paAXhe9pU2QytAuwgd9ZlCRL/o56B5ErGg
eCWkxkGvTlqI/bBp3yEjQP5MZENj5c8A3Q0bkT69BRAPxZ12qaONgF6J/ToOcgTEJbG1d62UIkH/
oudHrTkzm
|
kA9498FVwHiNZCcSgMREvKLYhVPdEVI0NEQy5BP4gDdCouRbXfUwJfTM4fM2QcYF/qT
Y4ExQswn3Gv4Lc52ewnYh7lmWuYMyofZDeiJNyG3iOggK98ahtQD/n6vVo0/gfyW3ZI171EegThE
tKV+tEF739mPQgM5P9kR6H9hg86OKzb4ALDnaHTHIRLixBGbwAqHYUI8t+D8ec1cQNwuOjZPxgQQ
nwu16nqNrCHQ//mMhGE5gL9HbibdIxIX2R0nkh6sKiVQD313SwpIX6bom8Sn6wQUCnG87KLLnMiI
q0WqP3mA3ttEqTBiZADOz1BQfBfEjvkoe5Py/4ECbYiDcxoDhkzulDrnWMAQtne5jV/XPoNr1Pjy
CBY040lc7gsD3r/H7ozzA+SjEBbudUvd8sz57PkPQTqpMX76PW8PBYUgWFnbrnppB0PyxrEt9Xxx
KxwDyysHTGHItfhVygtAHI2w0B3l0XDaBN8u2+ij0fXp+HlHQcEP+uVyWLIs3k/QhWWJGl15rIT1
fn7fWmb8mgVh7Wvj9oh/rT87+XoQrMfz5yrliMN8eXq5RxJ9IzXwdobHpQ5NoQvPzz/qz/dYNhU/
v5D6iuVzlfHrF1cy5aysovDsYZoarL8+AW8PvXU5I3sENd/7HDF1E31535meGl6GF/nvudv5MXIJ
73ubxrw34QeA/oVaOV1QEiSe6Nqr2V9qWFDsxaRXMwRZj2K1mIw6FsTep8deIIj+tWuV7SqePfWs
kNkzSIjbYnN1jQaTcY4rw2fbDv59P8zhpxN/sCDmojrYEvC8tE8ni0sA939x6y7bn/yO9C8koLg4
DaRDTSp/JwbKT0gSaFyrv7wqYL5U6UiFigPaHbUzKwYQx4Rsb7jZSeRey1tbTPcD8u9h9/zC75Cg
N3HdOr/sJqDvoL8PSTsC0G2R04r1UiTEcWBr6otaSPBnROHP8AjAeyz/zcTVNzUB41hpVIYC8kly
tnjMlgHkI+3voAtii+eD7jsz9Z5eRCAfHbbqwqwtBPJVop0Fu84B8hOicpwjBs2C7wthR6QmvCCi
f4VcfbcSpO/0EmizilOkEPO4Eia5QCakEzBej390lyUhThz5bFUeKcT7K9mbT+hKgfLEmjVuVQXd
nxjxoN3uNYH+58zeMhsUv6NvdSeUiI7WHfmiqiWg+Lvu2PLpzQwy2qXoGRiqQz+QoZN2R+vLdSNq
SYjzvXleHiES59sdszKXvGqg/JPiO+WKvfOBPMr/BwxBultcpWGI/eatwpSpMIQFuqhm8L5Dsfqm
tN+6vmM2ZLpqGfP+//XSz1gPnqOrH5PAyDDCtxu7OXfKMeZXOyko+gMfnxx55jEfhoLqrs09wxcv
wzyaVrLUEoY8RX+62iSEOJTuKE44tCjOhNduqtY
|
bijaydev/Implementation-of-Explicit-congestion-notification-ECN-in-TCP-over-wireless-network-in-ns-3
|
utils/tests/test-waf.py
|
Python
|
gpl-2.0
| 7,623
| 0.005903
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2014 Siddharth Santurkar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
|
warranty of
# MERCHANTABILITY or FITNESS FOR
|
A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# NOTE: Run this script with the Python3 interpreter if the python3 compatibility
# of the ns-3 unit test runner needs to be tested.
# The following options of waf are being tested for poratability by this script.
# To see the options supported by this script, run with the -h option on the command line
#
# build : executes the build (pre: configure, post: clean)
# check : run the equivalent of the old ns-3 unit tests using test.py
# clean : cleans the project
# configure: configures the project (pore: None, post: distclean)
# dist : makes a tarball for redistributing the sources (pre:none )
# distcheck: checks if the project compiles (tarball from 'dist') (pre: dist, post: rm -rf ns-3*.tar.bz2)
# docs : build all the documentation: doxygen, manual, tutorial, models (pre: configure; post: distclean)
# doxygen : do a full build, generate the introspected doxygen and then the doxygen
# install : installs the targets on the system (pre: configure, post: uninstall )
# list : lists the targets to execute (pre: configure)
# shell : run a shell with an environment suitably modified to run locally built programs (pre:configure)
# sphinx : build the Sphinx documentation: manual, tutorial, models
# step : executes tasks in a step-by-step fashion, for debugging (pre: configure)
# uninstall: removes the targets installed (pre: install, post uninstall)
# *update : updates the plugins from the *waflib/extras* directory
from __future__ import print_function
from TestBase import TestBaseClass
import sys
def replace(pre, post, main_cmd_list):
if pre:
pre = pre + ' && '
else:
pre = ''
if post:
post = ' && ' + post
else:
post = ''
return [ pre + main_cmd + post for main_cmd in main_cmd_list ]
def main(argv):
"""
Prepares test cases and executes
"""
runner = TestBaseClass(argv[1:], "Test suite for the ns-3 Waf build system", 'waf')
in_cmds = runner.override_cmds()
if in_cmds:
cmds = in_cmds.split(',')
else:
cmds = ['basic', 'build', 'configure', 'step', 'clean', 'dist', 'list']
config_test_cases = [
"--enable-gcov",
"--enable-sudo",
"--enable-sudo",
"--enable-tests",
"--disable-tests",
"--enable-examples",
"--disable-examples",
"--doxygen-no-build",
"--enable-static",
"--enable-mpi",
"--enable-rpath",
"--enable-modules=build/utils/test-runner.cc.1.o",
"--boost-static",
"--boost-mt",
"--boost-linkage_autodetect",
"--boost-python=33",
"--disable-gtk",
"--int64x64=cairo",
"--disable-pthread",
"--force-planetlab",
"--nopyc",
"--nopyo",
"--disable-python",
"--apiscan=all",
"--with-python=/usr/bin/python2.7",
"--no32bit-scan",
"-o test_out && rm -rf test_out",
"--out=test_out && rm -rf test_out",
"-t test_top && rm -rf test_top",
"--top=test_top && rm -rf test_top",
"--download",
"--check-c-compiler=gc",
"--check-cxx-compiler=g++",
]
basic_test_cases = [
"--version",
"-h",
"--help",
]
build_test_cases = [
"-j10",
"--jobs=10",
"-d optimized",
"-d debug",
"-d release",
"--build-profile optimized",
"--build-profile debug",
"--build-profile release",
"-p",
"--progress",
]
step_test_cases = [
"--files=\"*/main.c,*/test/main.o\"",
]
install_test_cases = [
"-f",
"--force",
"--prefix=./test-prefix && rm -rf ./test-prefix",
"--exec-prefix=.",
"--bindir=./test-prefix/bin --sbindir=./test-prefix/sbin --libexecdir=./test-prefix/libexec --sysconfdir=./test-prefix/etc --sharedstatedir=./test-prefix/com --localstatedir=./test-prefix/var --libdir=./test-prefix/lib --includedir=./test-prefix/include --oldincludedir=./test-prefix/usr/include --datarootdir=./test-prefix/share --datadir=./test-prefix/share_root --infodir=./test-prefix/info --localedir=./test-prefix/locale --mandir=./test-prefix/man --docdir=./test-prefix/doc/package --htmldir=./test-prefix/doc --dvidir=./test-prefix/doc --pdfdir=./test-prefix/doc --psdir=./test-prefix/doc && rm -rf ./test-prefix",
]
common_test_cases = [
"",
"-k",
"--keep",
"-v",
"--verbose",
"--nocache",
"--zones=task_gen",
"--zones=deps",
"--zones=tasks",
"--no-task-lines",
]
test_case_mappings = {
'basic' : basic_test_cases,
'configure' : config_test_cases,
'build' : build_test_cases,
'step' : step_test_cases,
'install' : install_test_cases,
}
waf_string = sys.executable + ' waf'
cmd_execute_list = []
for cmd in cmds:
if cmd == 'basic':
cmd_list = []
else:
cmd_list = ['%s %s %s' % (waf_string, cmd, option) for option in common_test_cases ]
if cmd in test_case_mappings:
cmd_list += ['%s %s %s' % (waf_string, cmd, option) for option in test_case_mappings[cmd] ]
if cmd == 'basic':
cmd_list.append('%s configure && %s build && %s --run scratch/myfirst' % tuple([waf_string]*3))
cmd_list.append('%s configure && %s build && %s --pyrun scratch/myfirst.py' % tuple([waf_string]*3))
if cmd == 'build':
cmd_list = replace(waf_string+' configure', waf_string+' clean', cmd_list)
cmd_list.append('%s configure --enable-gcov && %s build --lcov-report && %s clean' % tuple([waf_string]*3))
elif cmd == 'configure':
cmd_list = replace(None, waf_string+' distclean', cmd_list)
elif cmd == 'distcheck':
cmd_list = replace(waf_string+' dist', 'rm -rf ns-3*.tar.bz2', cmd_list)
elif cmd == 'docs':
cmd_list = replace(waf_string+' configure', waf_string+' distclean', cmd_list)
elif cmd == 'install':
cmd_list = replace(waf_string+' configure', waf_string+' uninstall', cmd_list)
elif cmd == 'list':
cmd_list = replace(waf_string+' configure', waf_string +' distclean', cmd_list)
elif cmd == 'shell':
cmd_list = replace(waf_string+' configure', waf_string+' distclean', cmd_list)
elif cmd == 'step':
cmd_list = replace(waf_string+' configure', waf_string+' distclean', cmd_list)
elif cmd == 'uninstall':
cmd_list = replace(waf_string+' install', None, cmd_list)
cmd_execute_list += cmd_list
return runner.runtests(cmd_execute_list)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
wileeam/airflow
|
airflow/contrib/operators/bigquery_to_gcs.py
|
Python
|
apache-2.0
| 1,678
| 0.00298
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs`."""
import warnings
from airflow.providers.google.cloud.operators.bigquery_to_gcs import BigQueryToGCSOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs`.",
DeprecationWarning, stacklevel=2
)
class BigQueryToCloudStorageOperator(BigQueryToGCSOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs.BigQueryToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google
|
.cloud.operators.bigquery_to_gcs.BigQueryToGCSOperator`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwarg
|
s)
|
allenlavoie/tensorflow
|
tensorflow/python/training/warm_starting_util.py
|
Python
|
apache-2.0
| 15,397
| 0.004287
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to warm-start TF.Learn Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.VocabInfo", "estimator.VocabInfo")
class VocabInfo(
collections.namedtuple("VocabInfo", [
"new_vocab",
"new_vocab_size",
"num_oov_buckets",
"old_vocab",
"old_vocab_size",
"backup_initializer",
])):
"""Vocabulary information for warm-starting.
See @{tf.estimator.WarmStartSettings$WarmStartSettings} for examples of using
VocabInfo to warm-start.
Attributes:
new_vocab: [Required] A path to the new vocabulary file (used with the
model to be trained).
new_vocab_size: [Required] An integer indicating how many entries of the new
vocabulary will used in training.
num_oov_buckets: [Required] An integer indicating how many OOV buckets are
associated with the vocabulary.
old_vocab: [Required] A path to the old vocabulary file (used with the
checkpoint to be warm-started from).
old_vocab_size: [Optional] An integer indicating how many entries of the old
vocabulary were used in the creation of the checkpoint. If not provided,
the entire old vocabulary will be used.
backup_initializer: [Optional] A variable initializer used for variables
corresponding to new vocabulary entries and OOV. If not provided, these
entries will be zero-initialized.
"""
def __new__(cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size=-1,
backup_initializer=None):
return super(VocabInfo, cls).__new__(
cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size,
backup_initializer,
)
def _is_variable(x):
return (isinstance(x, variables_lib.Variable) or
isinstance(x, resource_variable_ops.ResourceVariable))
def _infer_var_name(var):
"""Returns name of the `var`.
Args:
var: A list. The list can contain either of the following:
(i) A single `Variable`
(ii) A single `ResourceVariable`
(iii) Multiple `Variable` objects which must be slices of the same larger
variable.
(iv) A single `PartitionedVariable`
Returns:
Name of the `var`
"""
name_to_var_dict = saver.BaseSaverBuilder.OpListToDict(var)
if len(name_to_var_dict) > 1:
raise TypeError("`var` = %s passed as arg violates the constraints. "
"name_to_var_dict = %s" % (var, name_to_var_dict))
return list(name_to_var_dict.keys())[0]
def _warm_start_var(var, prev_ckpt, prev_tensor_name=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
|
"""
if _is_variable(var):
current_var_name = _infer_var_name
|
([var])
elif isinstance(var, list) and all(_is_variable(v) for v in var):
current_var_name = _infer_var_name(var)
elif isinstance(var, variables_lib.PartitionedVariable):
current_var_name = _infer_var_name([var])
var = var._get_variable_list() # pylint: disable=protected-access
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = current_var_name
checkpoint_utils.init_from_checkpoint(prev_ckpt, {prev_tensor_name: var})
# pylint: disable=protected-access
# Accesses protected members of tf.Variable to reset the variable's internal
# state.
def _warm_start_var_with_vocab(var,
current_vocab_path,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
the given `var` such that values corresponding to individual features in the
vocabulary remain consistent irrespective of changing order of the features
between old and new vocabularies.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
current_vocab_path: Path to the vocab file used for the given `var`.
current_vocab_size: An `int` specifying the number of entries in the current
vocab.
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
previous_vocab_size: If provided, will constrain previous vocab to the first
`previous_vocab_size` entries. -1 means use the entire previous vocab.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
Raises:
ValueError: If required args are not provided.
"""
if not (current_vocab_path and current_vocab_size and prev_ckpt and
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
if _is_variable(var):
var = [var]
elif isinstance(var, list) and all(_is_variable(v) for v in var):
var = var
elif isinstance(var, variables_lib.Partit
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert_gui/shell/shell_tools.py
|
Python
|
gpl-3.0
| 2,275
| 0.004396
|
import fnmatch
import os
import shlex
def autoCompleteList(text, items):
if not text:
completions = items
else:
completions = [item for item in items if item.lower().startswith(text.lower())]
return completions
def containsAny(string, chars):
return True in [char in string for char in chars]
def findRightMostSeparator(text, separators):
max_pos = 0
for separator in separators:
pos = text.rfind(separator)
max_pos = max(pos, max_pos)
return max_pos
def autoCompleteListWithSeparator(text, items, separators=":,@"):
if containsAny(text, separators):
auto_complete_list = autoCompleteList(text, items)
separator_pos = findRightMostSeparator(text, separators)
auto_complete_lis
|
t = [item[separator_pos + 1:] for item in auto_complete_list]
else:
auto_complete_list = autoCompleteList(text, items)
return auto_complete_list
def createParameterizedHelpFunction(parameters, help_message):
def helpFunction(self):
return parameters, help_message
return helpFunction
def pathify(head, tail):
path = os.path.join(head, tail)
if os.path.i
|
sdir(path):
return "%s/" % tail
return tail
def getPossibleFilenameCompletions(text, separators="-"):
head, tail = os.path.split(text.strip())
if head == "": # no head
head = "."
files = os.listdir(head)
separator_pos = 0
if containsAny(tail, separators):
separator_pos = findRightMostSeparator(tail, separators) + 1
return [pathify(head, f)[separator_pos:] for f in files if f.startswith(tail)]
def extractFullArgument(line, endidx):
newstart = line.rfind(" ", 0, endidx)
return line[newstart:endidx].strip()
def matchItems(line, items):
patterns = shlex.split(line)
result_items = set()
for pattern in patterns:
pattern_matches = set()
for item in items:
if fnmatch.fnmatch(item.lower(), pattern.lower()): # case-insensitive matching
pattern_matches.add(item)
if len(pattern_matches) == 0:
print("Error: Name/Pattern '%s' does not match anything." % pattern)
else:
result_items = result_items | pattern_matches
return result_items
|
froyobin/ironic
|
ironic/conductor/utils.py
|
Python
|
apache-2.0
| 6,324
| 0.000316
|
# coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import excutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.conductor import task_manager
from ironic.openstack.common import log
LOG = log.getLogger(__name__)
@task_manager.require_exclusive_lock
def node_set_boot_device(task, device, persistent=False):
"""Set the boot device for a node.
:param task: a TaskManager instance.
:param device: Boot device. Values are vendor-specific.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
:raises: InvalidParameterValue if the validation of the
ManagementInterface fails.
"""
if getattr(task.driver, 'management', None):
task.driver.management.validate(task)
task.driver.management.set_boot_device(task,
device=device,
persistent=persistent)
@task_manager.require_exclusive_lock
def node_power_action(task, new_state):
"""Change power state or reset for a node.
Perform the requested power action if the transition is required.
:param task: a TaskManager instance containing the node to act on.
:param new_state: Any power state from ironic.common.states. If the
state is 'REBOOT' then a reboot will be attempted, otherwise
the node power state is directly set to 'state'.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
node = task.node
target_state = states.POWER_ON if new_state == states.REBOOT else new_state
if new_state != states.REBOOT:
try:
curr_state = task.driver.power.get_power_state(task)
except Exception as e:
with excutils.save_and_reraise_exception():
node['last_error'] = _(
"Failed to change power state to '%(target)s'. "
"Error: %(error)s") % {'target': new_state, 'error': e}
node['target_power_state'] = states.NOSTATE
node.save()
if curr_state == new_state:
# Neither the ironic service nor the hardware has erred. The
# node is, for some reason, already in the requested state,
# though we don't know why. eg, perhaps the user previously
# requested the node POWER_ON, the network delayed those IPMI
# packets, and they are trying again -- but the node finally
# responds to the first request, and so the second request
# gets to this check and stops.
# This isn't an error, so we'll clear last_error field
# (from previous operation), log a warning, and return.
node['last_error'] = None
node['target_power_state'] = states.NOSTATE
node.save()
LOG.warn(_LW("Not going to change_node_power_state because "
"current state = requested state = '%(state)s'."),
{'state': curr_state})
return
if curr_state == states.ERROR:
# be optimistic and continue action
LOG.warn(_LW("Driver returns ERROR power state for node %s."),
node.uuid)
# Set the target_power_state and clear any last_error, if we're
# starting a new operation. This will expose to other processes
# and clients that work is in progress.
if node['target_power_state'] != target_state:
node['target_power_state'] = target_state
node['last_error'] = None
node.save()
# take power action
try:
if new_state != states.REBOOT:
task.driver.power.set_power_state(task, new_state)
else:
task.driver.power.reboot(task)
except Exception as e:
with excutils.save_and_reraise_exception():
node['last_error'] = _(
"Failed to change power state to '%(target)s'. "
"Error: %(error)s") % {'target': target_state, 'error': e}
else:
# success!
|
no
|
de['power_state'] = target_state
LOG.info(_LI('Succesfully set node %(node)s power state to '
'%(state)s.'),
{'node': node.uuid, 'state': target_state})
finally:
node['target_power_state'] = states.NOSTATE
node.save()
@task_manager.require_exclusive_lock
def cleanup_after_timeout(task):
"""Cleanup deploy task after timeout.
:param task: a TaskManager instance.
"""
node = task.node
node.provision_state = states.DEPLOYFAIL
node.target_provision_state = states.NOSTATE
msg = (_('Timeout reached while waiting for callback for node %s')
% node.uuid)
node.last_error = msg
LOG.error(msg)
node.save()
error_msg = _('Cleanup failed for node %(node)s after deploy timeout: '
' %(error)s')
try:
task.driver.deploy.clean_up(task)
except exception.IronicException as e:
msg = error_msg % {'node': node.uuid, 'error': e}
LOG.error(msg)
node.last_error = msg
node.save()
except Exception as e:
msg = error_msg % {'node': node.uuid, 'error': e}
LOG.error(msg)
node.last_error = _('Deploy timed out, but an unhandled exception was '
'encountered while aborting. More info may be '
'found in the log file.')
node.save()
|
elliottd/GroundedTranslation
|
data_generator.py
|
Python
|
bsd-3-clause
| 36,237
| 0.003229
|
"""
Data processing for VisualWordLSTM happens here; this creates a class that
acts as a data generator/feed for model training.
"""
from __future__ import print_function
from collections import defaultdict
import cPickle
import h5py
import logging
import numpy as np
np.set_printoptions(threshold='nan')
import os
import sys
import random
# Set up logger
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
# Strings for beginning, end of sentence, padding
# These get specified indices in word2index
BOS = "<S>" # index 1
EOS = "<E>" # index 2
PAD = "<P>" # index 0
# Dimensionality of image feature vector
IMG_FEATS = 4096
class VisualWordDataGenerator(object):
"""
Creates input arrays for VisualWordLSTM and deals with input dataset in
general. Input dataset must now be in HTF5 format.
Important methods:
random_generator() yields random batches from the training data split
fixed_generator() yields batches in the order it is stored on disk
generation_generator() yields batches with empty word sequences
"""
def __init__(self, args_dict, input_dataset=None):
"""
Initialise data generator: this involves loading the dataset and
generating vocabulary sizes.
If dataset is not given, use flickr8k.h5.
"""
logger.info("Initialising data generator")
self.args = args_dict
# Number of descriptions to return per image.
self.num_sents = args_dict.num_sents # default 5 (for flickr8k)
self.unk = args_dict.unk # default 5
self.run_string = args_dict.run_string
# self.datasets holds 1+ datasets, where additional datasets will
# be used for supertraining the model
self.datasets = []
self.openmode = "r+" if self.args.h5_writeable else "r"
if not input_dataset:
logger.warn("No dataset given, using flickr8k")
self.dataset = h5py.File("flickr8k/dataset.h5", self.openmode)
else:
self.dataset = h5py.File("%s/dataset.h5" % input_dataset, self.openmode)
logger.info("Train/val dataset: %s", input_dataset)
if args_dict.supertrain_datasets is not None:
for path in args_dict.supertrain_datasets:
logger.info("Adding supertrain datasets: %s", path)
self.datasets.append(h5py.File("%s/dataset.h5" % path, "r"))
self.datasets.append(self.dataset)
# hsn doesn't have to be a class variable.
# what happens if self.hsn is false but hsn_size is not zero?
self.use_source = False
if self.args.source_vectors is not None:
self.source_dataset = h5py.File("%s/dataset.h5"
% self.args.source_vectors,
"r")
self.source_encoder = args_dict.source_enc
self.source_type = args_dict.source_type
h5_dataset_keys = self.source_dataset['train']['000000'].keys()
self.h5_dataset_str = next((z for z in h5_dataset_keys if
z.startswith("%s-hidden_feats-%s" % (self.source_type,
self.source_encoder))), None)
#self.h5_dataset_str = "%s-hidden_feats-%s-%d" % (self.source_type,
# self.source_encoder,
# self.source_dim)
assert self.h5_dataset_str is not None
self.hsn_size = len(self.source_dataset['train']['000000']
[self.h5_dataset_str][0])
self.source_dim = self.hsn_size
self.num_hsn = len(self.source_dataset['train']['000000']
[self.h5_dataset_str])
self.use_source = True
logger.info("Reading %d source vectors from %s with %d dims",
self.num_hsn, self.h5_dataset_str, self.hsn_size)
self.use_image = False if self.args.no_image else True
# These variables are filled by extract_vocabulary
self.word2index = dict()
self.index2word = dict()
# This is set to include BOS & EOS padding
self.max_seq_len = 0
# Can check after extract_vocabulary what the actual max seq length is
# (including padding)
self.actual_max_seq_len = 0
# This counts number of descriptions per split
# Ignores test for now (change in extract_vocabulary)
self.split_sizes = {'train': 0, 'val': 0, 'test': 0}
# These are used to speed up the validation process
self._cached_val_input = None
self._cached_val_targets = None
self._cached_references = None
if self.args.use_predicted_tokens and self.args.no_image:
logger.info("Input predicted descriptions")
self.ds_type = 'predicted_description'
else:
logger.info("Input gold descriptions")
self.ds_type = 'descriptions'
def random_generator(self, split):
"""
Generator that produces input/output tuples for a given dataset and split.
Typically used to produce random batches for training a model.
The data is yielded by first shuffling the description indices and
then shuffling the image instances within the split.
"""
# For randomization, we use a independent Random instance.
random_instance = random.Random()
# Make sure that the desired split is actually in the dataset.
assert split in self.dataset
# Get a list of the keys. We will use this list to shuffle and iterate over.
identifiers = self.dataset[split].keys()
# Get the number of descriptions.
first_id = identifiers[0]
num_descriptions = len(self.dataset[split][first_id]['descriptions'])
description_indices = list(range(num_descriptions))
arrays = self.get_batch_arrays(self.args.batch_size)
batch_indices = []
j = 0
# Shuffle the description indices.
random_instance.shuffle(description_indices)
while j <= len(identifiers):
# And loop over them.
i = 0
for desc_idx in description_indices:
# F
|
or each iteration over the description indices, also shuffle the
# identifiers.
|
random_instance.shuffle(identifiers)
# And loop over them.
for ident in identifiers:
if i == self.args.batch_size:
targets = self.get_target_descriptions(arrays[0])
yield_data = self.create_yield_dict(arrays, targets,
batch_indices)
#logger.debug(yield_data['img'][0,0,:])
#logger.debug(' '.join([self.index2word[np.argmax(x)] for x in yield_data['text'][0,:,:]]))
#logger.debug(' '.join([self.index2word[np.argmax(x)] for x in yield_data['output'][0,:,:]]))
yield yield_data
i = 0
arrays = self.get_batch_arrays(self.args.batch_size)
batch_indices = []
description = self.dataset[split][ident]['descriptions'][desc_idx]
img_feats = self.get_image_features(self.dataset, split, ident)
try:
description_array = self.format_sequence(description.split(),
train=True)
arrays[0][i] = description_array
if self.use_image and self.use_source:
if self.args.peeking_source:
arrays[1][i, :] = \
self.get_source_features(split,
ident)
else:
arrays[1][i, 0] = \
self.get_source_features(split,
|
DT42/BerryNet
|
berrynet/service/movidius_service.py
|
Python
|
gpl-3.0
| 5,991
| 0.000501
|
# Copyright 2017 DT42
#
# This file is part of BerryNet.
#
# BerryNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BerryNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BerryNet. If not, see <http://www.gnu.org/licenses/>.
"""Engine service is a bridge between incoming data and inference engine.
"""
import argparse
import logging
from datetime import datetime
from berrynet import logger
from berrynet.comm import payload
from berrynet.dlmodelmgr import DLModelManager
from berrynet.engine.movidius_engine import MovidiusEngine
from berrynet.engine.movidius_engine import MovidiusMobileNetSSDEngine
from berrynet.service import EngineService
from berrynet.utils import draw_bb
from berrynet.utils import generate_class_color
class MovidiusClassificationService(EngineService):
def __init__(self, service_name, engine, comm_config):
super(MovidiusClassificationService, self).__init__(service_name,
engine,
comm_config)
def result_hook(self, generalized_result):
logger.debug('result_hook, annotations: {}'.format(generalized_result['annotations']))
self.comm.send('berrynet/engine/mvclassification/result',
payload.serialize_payload(generalized_result))
class MovidiusMobileNetSSDService(EngineService):
def __init__(self, service_name, engine, comm_config, draw=False):
super(MovidiusMobileNetSSDService, self).__init__(service_name,
engine,
comm_config)
self.draw = draw
def inference(self, pl):
duration = lambda t: (datetime.now() - t).microseconds / 1000
t = datetime.now()
logger.debug('payload size: {}'.format(len(pl)))
logger.debug('payload type: {}'.format(type(pl)))
jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
logger.debug('destringify_jpg: {} ms'.format(duration(t)))
t = datetime.now()
bgr_array = payload.jpg2bgr(jpg_bytes)
logger.debug('jpg2bgr: {} ms'.format(duration(t)))
t = datetime.now()
image_data = self.engine.process_input(bgr_array)
output = self.engine.inference(image_data)
model_outputs = self.engine.process_output(output)
logger.debug('Result: {}'.format(model_outputs))
logger.debug('Detectio
|
n takes {} ms'.format(duration(t)))
classes = self.engine.classes
labels = self.engine.labels
logger.debug('draw = {}'.format(self.draw))
if self.draw is False:
self.result_hook(self.generalize_result(jpg_json, model_outputs))
else:
self.result_hook(
draw_bb(bgr_array,
|
self.generalize_result(jpg_json, model_outputs),
generate_class_color(class_num=classes),
labels))
def result_hook(self, generalized_result):
logger.debug('result_hook, annotations: {}'.format(generalized_result['annotations']))
self.comm.send('berrynet/engine/mvmobilenetssd/result',
payload.serialize_payload(generalized_result))
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('--model',
help='Model file path')
ap.add_argument('--label',
help='Label file path')
ap.add_argument('--model_package',
default='',
help='Model package name')
ap.add_argument('--service_name', required=True,
help='Valid value: Classification, MobileNetSSD')
ap.add_argument('--num_top_predictions', default=5,
help='Display this many predictions')
ap.add_argument('--draw',
action='store_true',
help='Draw bounding boxes on image in result')
ap.add_argument('--debug',
action='store_true',
help='Debug mode toggle')
return vars(ap.parse_args())
def main():
# Test Movidius engine
args = parse_args()
if args['debug']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if args['model_package'] != '':
dlmm = DLModelManager()
meta = dlmm.get_model_meta(args['model_package'])
args['model'] = meta['model']
args['label'] = meta['label']
logger.debug('model filepath: ' + args['model'])
logger.debug('label filepath: ' + args['label'])
comm_config = {
'subscribe': {},
'broker': {
'address': 'localhost',
'port': 1883
}
}
if args['service_name'] == 'Classification':
mvng = MovidiusEngine(args['model'], args['label'])
service_functor = MovidiusClassificationService
elif args['service_name'] == 'MobileNetSSD':
mvng = MovidiusMobileNetSSDEngine(args['model'], args['label'])
service_functor = MovidiusMobileNetSSDService
else:
logger.critical('Legal service names are Classification, MobileNetSSD')
engine_service = service_functor(args['service_name'],
mvng,
comm_config,
draw=args['draw'])
engine_service.run(args)
if __name__ == '__main__':
main()
|
gdietz/OpenMEE
|
common_wizard_pages/histogram_dataselect_page.py
|
Python
|
gpl-3.0
| 1,558
| 0.009628
|
import sys
#from functools import partial
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import *
from ome_globals import *
import ui_histogram_dataselect_page
class HistogramDataSelectPage(QWizardPage, ui_histogram_dataselect_page.Ui_WizardPage):
def __init__(self, model, prev_hist_var=None, parent=None):
super(HistogramDataSelectPage, self).__init__(parent)
self.setupUi(self)
self.model = model
self.prev_hist_var = prev_hist_var
self._populate_combo_box()
def _populate_combo_box(self):
''' populates combo box with numerical variables '''
variables= self.model.get_variables()
variables.sort(key=lambda var: var.get_label())
default_index = 0
for var in variables:
# store column of var in user data
col = self.model.get_column_assigned_to_variable(var)
self.comboBox.addItem(var.get_label(), userData=QVariant(col))
index_of_item = self.comboBox.count()-1
if self.prev_hist_var == var:
default_index = index_of_item
# set default selection if g
|
iven
self.comboBox.setCurrentIndex(default_index)
self.completeChanged.emit()
def isComplete(self):
return True
def get_selected_var(self):
idx = self.comboBox.currentIndex()
data = self.comboBox.itemData(idx)
col = data.toInt()[0]
return self.model.get_varia
|
ble_assigned_to_column(col)
|
qbeenslee/Nepenthes-Server
|
utils/jsonutil.py
|
Python
|
gpl-3.0
| 1,193
| 0.00172
|
# coding:u
|
tf-8
'''
Author : qbeenslee
Created : 2015/1/20
'''
import json
import datetime
JSON_PARSE_ERROR = u"{'success':0,'data':[],'message':'Json 解析错误'}"
class NeJSONEncoder(json.JSONEncoder):
""" Wrapper
|
class to try calling an object's tojson() method. This allows us to JSONify objects coming from the ORM.
Also handles dates and datetimes. """
def default(self, obj):
try:
if hasattr(obj, '__json__'):
return obj.__json__()
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
except AttributeError:
return json.JSONEncoder.default(self, obj)
def obj2json(obj):
'''
自定义对象转Json
:param obj:
:return:
'''
if obj is not None:
try:
return json.dumps(obj, cls=NeJSONEncoder)
except TypeError, ins:
return JSON_PARSE_ERROR
else:
return JSON_PARSE_ERROR
def result_set_parse():
'''
数据集解析
:return:
'''
pass
|
minhphung171093/GreenERP
|
openerp/addons/web_editor/models/ir_translation.py
|
Python
|
gpl-3.0
| 578
| 0.00346
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright
|
and licensing details.
from openerp import models, api
class ir_translation(models.Model):
_inherit = 'ir.translation'
@api.model
def _get_terms_mapping(self, field, records):
if self._context.get('edit_translations'):
self.insert_missing(field, records)
return lambda data: '<span data-oe-translation-id="%(id)s" data-oe-translation-state="%(state)s">%(value)s</span>' % data
return super(ir_translation, self)._get_terms_mapping(field, records
|
)
|
amiraliakbari/sharif-mabani-python
|
by-session/ta-921/j8/tuple1.py
|
Python
|
mit
| 263
| 0.011407
|
a = (1, 2)
b = (1, 3, 5, 7, 8, 11)
print a[0]
#b[3] = 3 # error!
x1 = a[0]
|
y1 = a[1]
x1, y1 = a
b1, b2, b3, b4, b5, b6 = b
print b4
#b1
|
, b2 = b # error!
a = 1, 2, 3
print a
def f():
return 1, 3
a = f()
x, y = f()
x = f()[0]
|
ConPaaS-team/conpaas
|
cps-tools/src/cps_tools/taskfarm.py
|
Python
|
bsd-3-clause
| 10,346
| 0.002513
|
# import argcomplete
# import
|
httplib
# import logging
# import simplejson
# import sys
# import urllib2
# from time import strftime, localtime
# from conpaas.core import https
# from .base import BaseClient
# from .config import config
# from .service import ServiceCmd
# MODES = ['DEMO', 'REAL']
# TASKFARM_MNG_PORT = 8475
# def http_jsonrpc_post(hostname, uri, method, port=TASKFARM_MNG_PORT, params=None):
# """Perform a plain HTTP JSON RPC post (
|
for task farming)"""
# if params is None:
# params = {}
# url = "http://%s:%s%s" % (hostname, port, uri)
# data = simplejson.dumps({'method': method,
# 'params': params,
# 'jsonrpc': '2.0',
# 'id': 1,
# })
# req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
# res = urllib2.urlopen(req).read()
# return res
# def http_file_upload_post(host, uri, port=TASKFARM_MNG_PORT, params=None, files=None):
# """Perform a plain HTTP file upload post (for task farming)"""
# if params is None:
# params = {}
# if files is None:
# files = []
# content_type, body = https.client._encode_multipart_formdata(params, files)
# h = httplib.HTTP(host, port)
# h.putrequest('POST', uri)
# h.putheader('content-type', content_type)
# h.putheader('content-length', str(len(body)))
# h.endheaders()
# h.send(body)
# _errcode, _errmsg, _headers = h.getreply()
# return h.file.read()
# class TaskFarmCmd(ServiceCmd):
# def __init__(self, parser, client):
# self.initial_expected_state = 'RUNNING'
# ServiceCmd.__init__(self, parser, client, "taskfarm", ['node'],
# "TaskFarm service sub-commands help")
# self._add_get_mode()
# self._add_set_mode()
# self._add_upload()
# self._add_select_schedule()
# def call_manager(self, app_id, service_id, method, data=None):
# """TaskFarm peculiarities:
# 1) it works via plain HTTP
# 2) it uses port 8475
# 3) the 'shutdown' method is called 'terminate_workers'
# 4) it accepts only POST requests
# 5) it does not have to be started or stopped
# """
# if data is None:
# data = {}
# if method == "shutdown":
# method = "terminate_workers"
# service = self.client.service_dict(app_id, service_id)
# res = http_jsonrpc_post(service['application']['manager'], '/', method, params=data)
# try:
# data = simplejson.loads(res[1])
# except ValueError:
# data = simplejson.loads(res)
# return data.get('result', data)
# def _add_start(self):
# """
# TaskFarm does not have to be started.
# Overrides ServiceCmd._add_start().
# """
# pass
# def _add_stop(self):
# """
# TaskFarm does not have to be stopped.
# Overrides ServiceCmd._add_stop()
# """
# pass
# def _print_res(self, res):
# resres = res['result']
# if 'error' in resres:
# self.client.error("%s" % resres['error'])
# elif 'message' in resres:
# print "%s" % resres['message']
# else:
# print "%s" % res
# # ======= get_mode
# def _add_get_mode(self):
# subparser = self.add_parser('get_mode', help="get TaskFarm mode")
# subparser.set_defaults(run_cmd=self.get_mode, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# def get_mode(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# mode = self.get_string_mode(app_id, service_id)
# print "%s" % mode
# def get_string_mode(self, app_id, service_id):
# res = self.call_manager(app_id, service_id, "get_service_info")
# return res['mode']
# # ======= set_mode
# def _add_set_mode(self):
# subparser = self.add_parser('set_mode', help="set TaskFarm mode")
# subparser.set_defaults(run_cmd=self.set_mode, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('mode', choices=MODES, help="mode")
# def set_mode(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# old_mode = self.get_string_mode(app_id, service_id)
# if old_mode != 'NA':
# res = {'result': {'error': 'ERROR: mode is already set to %s' % old_mode}}
# else:
# res = self.call_manager(app_id, service_id, "set_service_mode", [args.mode])
# self._print_res(res)
# # ========== upload bag of tasks
# def _add_upload(self):
# subparser = self.add_parser('upload_bot', help="upload bag of tasks")
# subparser.set_defaults(run_cmd=self.upload_bag_of_tasks,
# parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('filename',
# help="file containing the bag of tasks")
# subparser.add_argument('location',
# help="XtreemFS location, e.g., 192.168.122.1/uc3")
# def upload_bag_of_tasks(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# mode = self.get_string_mode(app_id, service_id)
# if mode == 'NA':
# res = {'result': {'error': 'ERROR: to upload bag of task, first specify run mode.'}}
# else:
# service = self.client.service_dict(app_id, service_id)
# params = {'uriLocation': args.location,
# 'method': 'start_sampling'}
# filecontents = open(args.filename).read()
# res = http_file_upload_post(service['application']['manager'], '/', params=params,
# files=[('botFile', args.filename, filecontents)])
# res = simplejson.loads(res)
# self._print_res(res)
# # ========= select_schedule
# def _add_select_schedule(self):
# subparser = self.add_parser('upload_bot', help="upload bag of tasks")
# subparser.set_defaults(run_cmd=self.select_schedule, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('schedule', type=int, help="schedule identifier")
# def _select_schedule(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# mode = self.get_mode(app_id, service_id)
# if mode == 'NA':
# return {'result': {'error': 'ERROR: to select a schedule, first specify run mode DEMO or REAL, then upload a bag of tasks '}}
# # check schedule availability
# res = self.call_manager(app_id, service_id, "get_service_info")
# if res['noCompletedTasks'] == 0:
# return {'message': "No schedule available yet: try again later..."}
# if res['state'] != 'RUNNING':
# return {'message': "Busy %s: try again later..." % res['phase']}
# sres = self.call_manager(app_id, service_id, "get_sampling_results")
# sdata = simplejson.
|
ekutlu/raspberry-test
|
test.py
|
Python
|
mit
| 40
| 0.025
|
__au
|
thor__ = 'emre'
|
print "hello world"
|
TunedMystic/taskr
|
webapp/urls.py
|
Python
|
gpl-2.0
| 434
| 0.013825
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.co
|
nf import settings
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'webapp.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r"", include("meeting.urls", namespace = "meeting")),
url(r'^admin/', include(admin.site.urls)),
)
handler404 = "misc.views.han
|
dler404"
handler500 = "misc.views.handler500"
|
ameyavilankar/social-network-recommendation
|
preprocessing/random_benchmark.py
|
Python
|
bsd-2-clause
| 1,033
| 0.006776
|
#! /usr/bin/env python
import random
import utilities
def read_nodes_from_training(file_name):
"""
Returns a list of all the nodes in the graph
"""
node_set = set()
for nodes in utilities.edges_generator(file_name):
for node in nodes
|
:
node_set.add(node)
return list(node_set)
def random_benchmark(train_file, test_file, submission_file, num_predictions):
"""
Runs the random benchmark.
"""
nodes = read_nodes_from_training(train_file)
test_nodes = utilities.read_nodes_list(test_file)
test_predictions = [[random.choice(nodes) for x in range(num_predictions)]
for node i
|
n test_nodes]
utilities.write_submission_file(submission_file,
test_nodes,
test_predictions)
if __name__=="__main__":
random_benchmark("../Data/train.csv",
"../Data/test.csv",
"../Submissions/random_benchmark.csv",
10)
|
ld4apps/lda-examples
|
todo-flask/src/app.py
|
Python
|
apache-2.0
| 1,842
| 0.008143
|
import lda
from flask import Flask, abort, request
app = Flask(__name__, static_url_path='')
@app.route('/td/items', methods=['GET'])
def items():
document, status, headers = lda.get_virtual_container(request.environ, 'ce_item_of')
if status != 200:
abort(status)
document, headers = lda.convert_to_requested_format(document, headers, request.environ)
return document, status, headers
@app.route('/td/items', methods=['POST'])
def create_item():
if request.json.get('rdf_type') != "http://example.org/todo#Item":
abort(400)
#TODO: add more validation
document, status, headers = lda.create_document(request.environ, request.json, 'ce_item_of')
if status != 201:
abort(status)
document, headers = lda.convert_to_requested_format(document, headers, request.environ)
return document, status, headers
@app.route('/td/items/<i>', methods=['GET'])
def read_item(i):
document, status, headers = lda.ge
|
t_document(request.environ)
if status != 200:
abort(status)
document, headers = lda.convert_to_reque
|
sted_format(document, headers, request.environ)
return document, status, headers
@app.route('/td/items/<i>', methods=['DELETE'])
def delete_item(i):
document, status, headers = lda.delete_document(request.environ)
return "", status, headers
@app.route('/td/items/<i>', methods=['PATCH'])
def change_item(i):
#TODO: add validation
document, status, headers = lda.patch_document(request.environ, request.json)
if status != 200:
abort(status)
document, headers = lda.convert_to_requested_format(document, headers, request.environ)
return document, status, headers
@app.route('/td', methods=['DELETE'])
def delete_all():
document, status, headers = lda.delete_document(request.environ)
return "", status, headers
|
vjFaLk/frappe
|
frappe/core/doctype/defaultvalue/defaultvalue.py
|
Python
|
mit
| 607
| 0.021417
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DefaultValue(Document):
pass
def on_doctype_update():
"""Create indexes for `tabDefaultValue` on `(parent, defkey)`"""
frappe.db.commit()
frappe.db.add_index(doctype='DefaultValue',
|
fields=['parent', 'defkey'],
index_name='defaultvalue_parent_defkey_index')
frappe.db.add_index(doctype='DefaultValue',
fields=['parent', 'p
|
arenttype'],
index_name='defaultvalue_parent_parenttype_index')
|
guardicore/monkey
|
monkey/tests/unit_tests/infection_monkey/telemetry/attack/test_t1197_telem.py
|
Python
|
gpl-3.0
| 917
| 0.001091
|
import json
import pytest
from common.utils.attack_utils import ScanStatus
from infection_monkey.model import VictimHost
from infection_monkey.telemetry.attack.t1197_telem import T1197Telem
DOMAIN_NAME = "domai
|
n-name"
IP = "127.0.0.1"
MACHINE = VictimHost(IP, DOMAIN_NAME)
STATUS = ScanStatus.USED
USAGE_STR = "[Usage info]"
@pytest.fixture
def T1197_telem_test_instance():
return T1197Telem(STATUS, MACHINE, USAGE_STR)
def test_T1197_send(T1197_telem_test_instance, spy_send_telemetry):
T1197_telem_test_instance.send()
expected_data = {
"status": STATUS.value,
"technique": "T1197",
|
"machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
"usage": USAGE_STR,
}
expected_data = json.dumps(expected_data, cls=T1197_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"
|
ghilbut/djign
|
djign/__init__.py
|
Python
|
mit
| 128
| 0.023438
|
# -*- coding:utf-8 -*-
from django.shortcuts import render
def
|
index(request):
return re
|
nder(request, 'djign/index.html')
|
shiroyuki/vireo
|
vireo/drivers/amqp/consumer.py
|
Python
|
mit
| 24,473
| 0.010256
|
import json
import math
import sys
import threading
import traceback
import time
import uuid
from pika import BasicProperties
from pika.exceptions import ConnectionClosed, ChannelClosed, IncompatibleProtocolError
from ...helper import log, debug_mode_enabled
from ...model import Message, RemoteSignal
from .exception import NoConnectionError
from .helper import active_connection, fill_in_the_blank, SHARED_DIRECT_EXCHANGE_NAME, SHARED_TOPIC_EXCHANGE_NAME, SHARED_SIGNAL_CONNECTION_LOSS
IMMEDIATE_RETRY_LIMIT = 10
MAX_RETRY_COUNT = 20
MAX_RETRY_DELAY = 30
PING_MESSAGE = 'ping'
class Consumer(threading.Thread):
""" Message consumer
This is used to handle messages on one particular route/queue.
:param str url: the URL to the server
:param str route: the route to observe
:param callable callback: the callback function / callable object
:param list shared_stream: the internal message queue for thread synchronization
:param bool resumable: the flag to indicate whether the consumption is resumable
:param bool resumable: the flag to indicate whether the messages are distributed evenly across all consumers on the same route
:param dict queue_options: additional queue options
:param dict exchange_options: additional exchange options
:param bool unlimited_retries: the flag to disable limited retry count.
:param callable on_connect: a callback function w
|
hen the message consumption begins.
:param callable on_disconnect: a callback function when the message consumption is interrupted due to unexpected disconnection.
:param callable on_error: a callback function when the message con
|
sumption is interrupted due to exception raised from the main callback function.
:param str controller_id: the associated controller ID
:param dict exchange_options: the additional options for exchange
:param bool auto_acknowledge: the flag to determine whether the consumer should auto-acknowledge any delivery (default: ``False``)
:param bool send_sigterm_on_disconnect: the flag to force the consumer to terminate the process cleanly on disconnection (default: ``True``)
:param float delay_per_message: the delay per message (any negative numbers are regarded as zero, zero or any equivalent value is regarded as "no delay")
:param int max_retries: the maximum total retries the consumer can have
:param int immediate_retry_limit: the maximum immediate retries the consumer can have before it uses the exponential delay
Here is an example for ``on_connect``.
.. code-block:: Python
def on_connect(consumer = None):
...
Here is an example for ``on_disconnect``.
.. code-block:: Python
def on_disconnect(consumer = None):
...
Here is an example for ``on_error``.
.. code-block:: Python
def on_error(exception, consumer = None):
...
"""
def __init__(self, url, route, callback, shared_stream, resumable, distributed, queue_options,
simple_handling, unlimited_retries = False, on_connect = None, on_disconnect = None,
on_error = None, controller_id = None, exchange_options = None, auto_acknowledge = False,
send_sigterm_on_disconnect = True, delay_per_message = 0, max_retries = MAX_RETRY_COUNT,
immediate_retry_limit = IMMEDIATE_RETRY_LIMIT, max_retry_delay = MAX_RETRY_DELAY):
super().__init__(daemon = True)
queue_options = queue_options if queue_options and isinstance(queue_options, dict) else {}
exchange_options = exchange_options if exchange_options and isinstance(exchange_options, dict) else {}
self.url = url
self.route = route
self.callback = callback
self.resumable = resumable
self.distributed = distributed
self.queue_options = queue_options
self.exchange_options = exchange_options
self.simple_handling = simple_handling
self._retry_count = 0
self._shared_stream = shared_stream
self._channel = None
self._queue_name = None
self._paused = False
self._stopped = False
self._controller_id = controller_id
self._consumer_id = str(uuid.uuid4())
self._max_retries = max_retries
self._max_retry_delay = max_retry_delay
self._immediate_retry_limit = immediate_retry_limit if immediate_retry_limit < max_retries else max_retries
self._send_sigterm_on_disconnect = send_sigterm_on_disconnect
self._delay_per_message = (
delay_per_message
if (
delay_per_message
and isinstance(delay_per_message, (int, float))
and delay_per_message > 0
)
else 0
)
self._auto_acknowledge = auto_acknowledge
self._unlimited_retries = unlimited_retries
self._on_connect = on_connect
self._on_disconnect = on_disconnect
self._on_error = on_error
self._recovery_queue_name = 'RECOVERY.{}'.format(self.route)
assert not self._on_disconnect or callable(self._on_disconnect), 'The error handler must be callable.'
@staticmethod
def can_handle_route(routing_key):
""" Check if the consumer can handle the given routing key.
.. note:: the default implementation will handle all routes.
:param str routing_key: the routing key
"""
return True
@property
def queue_name(self):
return self._queue_name
@property
def stopped(self):
return self._stopped
def run(self):
log('debug', '{}: Active'.format(self._debug_route_name()))
while not self._stopped:
try:
self._listen()
except NoConnectionError as e:
self._retry_count += 1
remaining_retries = self._max_retries - self._retry_count
can_immediate_retry = self._retry_count <= self._immediate_retry_limit
wait_time = 0 if can_immediate_retry else math.pow(2, self._retry_count - self._immediate_retry_limit - 1)
# Notify the unexpected disconnection
log('warning', '{}: Unexpected disconnection detected due to {} (retry #{})'.format(self._debug_route_name(), e, self._retry_count))
# Attempt to retry and skip the rest of error handling routine.
if remaining_retries >= 0:
log(
'info',
'{}: Will reconnect to the queue in {}s ({} attempt(s) left)'.format(
self._debug_route_name(),
wait_time,
remaining_retries,
)
)
# Give a pause between each retry if the code already retries immediate too often.
if wait_time:
time.sleep(1)
log('warning', '{}: Reconnecting...'.format(self._debug_route_name()))
continue
elif self._on_disconnect:
log('warning', '{}: {} the maximum retries (retry #{}/{})'.format(self._debug_route_name(), 'Reached' if self._retry_count == self._max_retries else 'Exceeded', self._retry_count, self._max_retries))
self._async_invoke_callback(self._on_disconnect)
log('warning', '{}: Passed the error information occurred to the error handler'.format(self._debug_route_name()))
if self._unlimited_retries:
log('info', '{}: Will re-listen to the queue in 5 second (unlimited retries)'.forma
|
titasakgm/brc-stock
|
openerp/addons/ineco_stock/product.py
|
Python
|
agpl-3.0
| 5,430
| 0.009024
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
#from _common import rounding
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class product_uom_categ(osv.osv):
_inherit = 'product.uom.categ'
_columns = {
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
}
_defaults = {
'active': True,
}
class product_uom(osv.osv):
_inherit = 'product.uom'
_description = 'Product Unit of Measure'
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','category_id','factor','uom_type'], context=context)
res = []
for record in reads:
name = record['name']
if record['category_id']:
uom_categ = record['category_id']
#print uom_categ
if record['uom_type'] == 'reference':
name = name
elif record['uom_type'] == 'bigger':
name = ('%.0f' % (1/record['factor'])) + ' ' +uom_categ[1] +' / '+name
|
else:
name = ('%.0f' % (record['factor'])) + ' ' +name+' / '+uom_categ[1]
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
i
|
f isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
res = {}
for id in ids:
data = self.browse(cr, uid, id)
if data.uom_type == 'reference':
res[id] = 1
elif data.uom_type == 'bigger':
res[id] = int ('%.0f' % (1/data.factor))
else:
res[id] = int('%.0f' % (data.factor))
return res
_columns = {
'factor_name': fields.function(_name_get_fnc, type="integer", string='Factor'),
}
class product_category(osv.osv):
def _get_product_stock(self, cr, uid, ids, name, arg, context=None):
res = {'product_count': False,
'product_onhand':False,
'product_forecast': False,
}
product_obj = self.pool.get('product.product')
for id in ids:
product_ids = product_obj.search(cr, uid, [('categ_id','=',id)])
onhand = 0
forecast = 0
for product in product_obj.browse(cr, uid, product_ids):
onhand += product.qty_available
forecast += product.virtual_available
res[id] = {
'product_count': len(product_ids),
'product_onhand': onhand,
'product_forecast': forecast
}
return res
_inherit = 'product.category'
_columns = {
'product_count': fields.function(_get_product_stock, string='Product Count', type='integer', multi='inventory', readonly=True),
'product_onhand': fields.function(_get_product_stock, string='On Hand', type='float', multi='inventory', readonly=True),
'product_forecast': fields.function(_get_product_stock, string='Forecast', type='float', multi='inventory', readonly=True),
}
class product_product(osv.osv):
_inherit = 'product.product'
def _get_last_incoming_shipment_date(self, cr, uid, ids, name, arg, context=None):
res = {}
dbs = cr.dbname
if dbs == 'BS100':
dest_id = 16
else:
dest_id = 17
for id in ids:
cr.execute('select create_date::timestamp::date from stock_move where product_id=%s and location_dest_id=%s order by create_date desc limit 1',[id,dest_id])
dict = cr.dictfetchone()
if dict is None:
res[id] = 'NA'
else:
res[id] = dict.get('create_date')
return res
_columns = {
'internal_barcode': fields.char('Internal Barcode', size=64),
'last_ship_date': fields.function(_get_last_incoming_shipment_date, string='Last Shipped', type='char', readonly=True),
}
|
rkhleics/wagtailmenus
|
wagtailmenus/migrations/0010_auto_20160201_1558.py
|
Python
|
mit
| 408
| 0
|
# -*- c
|
oding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailmenus', '0009_auto_20160201_0859'),
]
operations = [
migrations.RenameField(
model_name='mainmenuitem',
old_name='add_subnav',
new_nam
|
e='allow_subnav',
),
]
|
bitmazk/webfaction-django-boilerplate
|
website/webapps/django/project/settings/test_settings.py
|
Python
|
mit
| 371
| 0
|
# flake8: noqa
"
|
""Settings to be used for running tests."""
from settings import *
INSTALLED_APPS.append('integration_tests')
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
EMAIL_SUBJECT_PREFIX = '[test] '
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
SO
|
UTH_TESTS_MIGRATE = False
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/containers/browsers.py
|
Python
|
gpl-3.0
| 1,168
| 0
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CON
|
DITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import browsers
from crystal_dashboard.dashboards.crystal.containers import tables
class ContainerBrowser(browsers.ResourceBrowser):
name = "swift"
verbose_name = _("Swift")
navigation_table_class = tables.ContainersTable
|
content_table_class = tables.ObjectsTable
navigable_item_name = _("Container")
navigation_kwarg_name = "container_name"
content_kwarg_name = "subfolder_path"
has_breadcrumb = True
breadcrumb_url = "horizon:crystal:containers:index"
|
cmelange/ansible
|
lib/ansible/modules/network/nxos/nxos_vlan.py
|
Python
|
gpl-3.0
| 13,912
| 0.001509
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vlan
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
host: 68.170.147.165
username: cisco
password: cisco
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
admin_state: down
name: WEB
transport: nxapi
username: cisco
password: cisco
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
state: absent
transport: nxapi
username: cisco
password: cisco
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: when debug enabled
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_state": "suspend", "mapped_vni": "5000"}
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: when debug enabled
type: dict
|
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""}
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
re
|
turned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20",
"vlan_state": "suspend", "mapped_vni": "5000"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
commands:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.nxos import run_commands, load_config, get_config
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return numerical_sort(result)
return result
def numerical_sort(string_int_list):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
as_str_list = []
for vlan in string_int_list:
as_int_list.append(int(vlan))
as_int_list.sort()
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.items():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = VLAN_ARGS.get(param).format(vlan.get(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])
vlan_list = []
vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
#command = 'show run all | section vlan.{0}'.format(vlanid)
#body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
|
fire-uta/iiix-data-parser
|
cli.py
|
Python
|
mit
| 1,078
| 0.008349
|
import optparse
class CLI:
pass
CLI.parser = optparse.OptionParser()
CLI.parser.add_option("-q", "--queries", dest="queries", help="Queries csv file", metavar="FILE")
CLI.parser.add_option("-l", "--logs", dest="logs", help="Path to log files contain
|
ing directory", metavar="DIR")
CLI.parser.add_option("-r", "--results", dest="results",
help="Path to result files containing directory", metavar="DIR")
CLI.parser.add_option("-g", "--gains", dest="gains"
|
,
help="Comma-separated list of gains for different relevance levels, eg. 0,1,10", metavar="LIST")
CLI.parser.add_option("-s", "--serp-len", dest="serp_len",
help="Number of results on a single Search Engine Result Page [default: %default]",
default=10, metavar="N")
CLI.parser.add_option("-c", action="store_true", dest="use_combined_log_parser", help="Use combined log parser")
CLI.parser.add_option("-a", action="store_true", dest="use_alt_log_format", help="Use alternative log format")
CLI.parsedArgs = CLI.parser.parse_args()
|
jindongh/kombu
|
kombu/tests/transport/test_redis.py
|
Python
|
bsd-3-clause
| 40,008
| 0.000025
|
from __future__ import absolute_import
import socket
import types
from collections import defaultdict
from itertools import count
from kombu import Connection, Exchange, Queue, Consumer, Producer
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, Queue as _Queue
from kombu.transport import virtual
from kombu.utils import eventio # patch poll
from kombu.utils.json import dumps, loads
from kombu.tests.case import (
Case, Mock, call, module_exists, skip_if_not_module, patch,
)
class _poll(eventio._select):
def register(self, fd, flags):
if flags & eventio.READ:
self._rfd.add(fd)
def poll(self, timeout):
events = []
for fd in self._rfd:
if fd.data:
events.append((fd.fileno(), eventio.READ))
return events
eventio.poll = _poll
from kombu.transport import redis # must import after poller patch
class ResponseError(Exception):
pass
class Client(object):
queues = {}
sets = defaultdict(set)
hashes = defaultdict(dict)
shard_hint = None
def __init__(self, db=None, port=None, connection_pool=None, **kwargs):
self._called = []
self._connection = None
self.bgsave_raises_ResponseError = False
self.connection = self._sconnection(self)
def bgsave(self):
self._called.append('BGSAVE')
if self.bgsave_raises_ResponseError:
raise ResponseError()
def delete(self, key):
self.queues.pop(key, None)
def exists(self, key):
return key in self.queues or key in self.sets
def hset(self, key, k, v):
self.hashes[key][k] = v
def hget(self, key, k):
return self.hashes[key].get(k)
def hdel(self, key, k):
self.hashes[key].pop(k, None)
def sadd(self, key, member, *args):
self.sets[key].add(member)
zadd = sadd
def smembers(self, key):
return self.sets.get(key, set())
def srem(self, key, *args):
self.sets.pop(key, None)
zrem = srem
def llen(self, key):
try:
return self.queues[key].qsize()
except KeyError:
return 0
def lpush(self, key, value):
self.queues[key].put_nowait(value)
def parse_response(self, connection, type, **options):
cmd, queues = self.connection._sock.data.pop()
assert cmd == type
self.connection._sock.data = []
if type == 'BRPOP':
item = self.brpop(queues, 0.001)
if item:
return item
raise Empty()
def brpop(self, keys, timeout=None):
key = keys[0]
try:
item = self.queues[key].get(timeout=timeout)
except Empty:
pass
else:
return key, item
def rpop(self, key):
try:
return self.queues[key].get_nowait()
except KeyError:
pass
def __contains__(self, k):
return k in self._called
def pipeline(self):
return Pipeline(self)
def encode(self, value):
return str(value)
def _new_queue(self, key):
self.queues[key] = _Queue()
class _sconnection(object):
disconnected = False
class _socket(object):
blocking = True
filenos = count(30)
def __init__(self, *args):
self._fileno = next(self.filenos)
self.data = []
def fileno(self):
return self._fileno
def setblocking(self, blocking):
self.blocking = blocking
def __init__(self, client):
self.client = client
self._sock = self._socket()
def disconnect(self):
self.disconnected = True
def send_command(self, cmd, *args):
self._sock.data.append((cmd, args))
def info(self):
return {'foo': 1}
def pubsub(self, *args, **kwargs):
connection = self.connection
class ConnectionPool(object):
def get_connection(self, *args, **kwargs):
return connection
self.connection_pool = ConnectionPool()
return self
class Pipeline(object):
def __init__(self, client):
self.client = client
self.stack = []
def __getattr__(self, key):
if key not in self.__dict__:
def _add(*args, **kwargs):
self.stack.append((getattr(self.client, key), args, kwargs))
return self
return _add
return self.__dict__[key]
def execute(self):
stack = list(self.stack)
self.stack[:] = []
return [fun(*args, **kwargs) for fun, args, kwargs in stack]
class Channel(redis.Channel):
def _get_client(self):
return Client
def _get_pool(self):
return Mock()
def _get_response_error(self):
return ResponseError
def _new_queue(self, queue, **kwargs):
self.client._new_queue(queue)
def pipeline(self):
return Pipeline(Client())
class Transport(redis.Transport):
Channel = Channel
def _get_errors(self):
return ((KeyError,), (IndexError,))
class test_Channel(Case):
@skip_if_not_module('redis')
def setup(self):
self.connection = self.create_connection()
self.channel = self.connection.default_channel
def create_connection(self, **kwargs):
kwargs.setdefault('transport_options', {'fanout_patterns': True})
return Connection(transport=Transport, **kwargs)
def _get_one_delivery_tag(self, n='test_uniq_tag'):
with self.create_connection() as conn1:
chan = conn1.default_channel
chan.exchange_declare(n)
chan.queue_declare(n)
chan.queue_bind(n, n, n)
msg = chan.prepare_message('quick brown fox')
chan.basic_publish(msg, n, n)
q, payload = chan.client.brpop([n])
self.assertEqual(q, n)
self.assertTrue(payload)
pymsg = chan.message_to_python(loads(payload))
return pymsg.delivery_tag
def test_delivery_tag_is_uuid(self):
seen = set()
for i in range(100):
tag = self._get_one_delivery_tag()
self.assertNotIn(tag, seen)
seen.add(tag)
with self.assertRaises(ValueError):
int(tag)
self.assertEqual(len(tag), 36)
def test_disable_ack_emulation(self):
conn = Connection(transport=Transport, transport_options={
|
'ack_emulation': False,
})
chan = conn.channel()
self.assertFalse(chan.ack_emulation)
self.assertEqual(chan.QoS, virtual.QoS)
def test_redis_info_raises(self):
pool = Mock(name='pool')
pool_at_init = [pool]
client = Mock(name='client')
class XChannel(Channel):
def __init__(self, *args, **k
|
wargs):
self._pool = pool_at_init[0]
super(XChannel, self).__init__(*args, **kwargs)
def _get_client(self):
return lambda *_, **__: client
class XTransport(Transport):
Channel = XChannel
conn = Connection(transport=XTransport)
client.info.side_effect = RuntimeError()
with self.assertRaises(RuntimeError):
conn.channel()
pool.disconnect.assert_called_with()
pool.disconnect.reset_mock()
pool_at_init = [None]
with self.assertRaises(RuntimeError):
conn.channel()
self.assertFalse(pool.disconnect.called)
def test_after_fork(self):
self.channel._pool = None
self.channel._after_fork()
self.channel._pool = Mock(name='pool')
self.channel._after_fork()
self.channel._pool.disconnect.assert_called_with()
def test_next_delivery_tag(self):
self.assertNotEqual(
self.channel._next_delivery_tag(),
self.channel._next_delivery_tag(),
)
def test_do_restore_message(self):
client = Mock(name='client')
pl1 = {'body': 'BODY'}
spl1
|
sherrardTr4129/FaceHarold
|
getCSVData.py
|
Python
|
mit
| 2,092
| 0.010516
|
import csv
import math
from CSHLDAP import CSHLDAP #get this from the wiki
from subprocess import call
your_gender_list = []
with open('match_scores.csv', 'rb') as f:
reader = csv.reader(f)
your_gender_list = list(reader)
print(your_gender_list)
tempList = [0] * len(your_gender_list)
peopleList = [0] * len(your_gender_list)
for x in range(len(your_gender_list)):
tempList[x] = your_gender_list[x][1]
peopleList[x] = your_gender_list[x][0]
tempList[0] = -10
finalPeopleList = []
for x in range(len(peopleList)):
name = peopleList[x].split("/")
nametemp = name[len(name) - 1]
finalPeopleList += [nametemp]
finalPeopleList[0] = ""
print(finalPeopleList)
index = min(range(len(tempList)), key=lambda i: abs(round(float(tempList[i]), 3)-11.5))
print(finalPeopleList,tempList)
base_dn = 'ou=Users,dc=csh,dc=rit,dc=edu'
host = 'ldap://ldap.csh.rit.edu'
password = '<put password here>'
print("Prediction: ", finalPeopleList[index] , " Confidence: ", tempList[index])
ldap_con = CSHLDAP("<put user name here>", password)
name = finalPeopleList[index].split(" ")
nameStr = name[0].strip() + " " + name[1].strip()
print(nameStr)
result = ldap_con.search(cn=nameStr)
msgString = "Hello " + nameStr
file = open("names.txt", "a+")
file.write(nameStr +" Confidence: " + tempList[index] + "\n")
file.close()
onFloor = result[0][1]['onfloor']
skills =[]
if('skills' in result[0][1]):
skills = result[0][1]['skills']
if(int(onFloor[0])):
msgString += " you are an on floor member "
else:
msgString += " you are an off floor member "
skillsStr = ""
if(skills != []):
for x in range(len(skills)):
if(x == 0):
|
skillsStr += skills[x] + ", "
elif(x == len(skills)-1):
skillsStr += ", and " + skills[x]
else:
skillsStr += ", " + skills[x]
msgString += "with skills in " + skillsStr
print(msgString)
call(["pico2wave", "
|
-w", "msg.wav", msgString])
|
leb2dg/osf.io
|
osf/models/node.py
|
Python
|
apache-2.0
| 117,415
| 0.001823
|
import functools
import itertools
import logging
import re
import urlparse
import warnings
import bson
from django.db.models import Q
from dirtyfields import DirtyFieldsMixin
from django.apps import apps
from django.contrib.contenttypes.fields import GenericRelation
from django.core.paginator import Paginator
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models, transaction, connection
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import cached_property
from keen import scoped_keys
from psycopg2._psycopg import AsIs
from typedmodels.models import TypedModel, TypedModelManager
from include import IncludeManager
from framework import status
from framework.celery_tasks.handlers import enqueue_task
from framework.exceptions import PermissionsError
from framework.sentry import log_exception
from addons.wiki.utils import to_mongo_key
from osf.exceptions import ValidationValueError
from osf.models.contributor import (Contributor, RecentlyAddedContributor,
get_contributor_permissions)
from osf.models.identifiers import Identifier, IdentifierMixin
from osf.models.licenses import NodeLicenseRecord
from osf.models.mixins import (AddonModelMixin, CommentableMixin, Loggable,
NodeLinkMixin, Taggable)
from osf.models.node_relation import NodeRelation
from osf.models.nodelog import NodeLog
from osf.models.sanctions import RegistrationApproval
from osf.models.private_link import PrivateLink
from osf.models.spam import SpamMixin
from osf.models.tag import Tag
from osf.models.user import OSFUser
from osf.models.validators import validate_doi, validate_title
from framework.auth.core import Auth, get_user
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField
from osf.utils.requests import DummyRequest, get_request_and_user_id
from osf.utils.workflows import DefaultStates
from website import language, settings
from website.citations.utils import datetime_to_csl
from website.exceptions import (InvalidTagError, NodeStateError,
TagNotFoundError, UserNotAffiliatedError)
from website.project.licenses import set_license
from website.mails import mails
from website.project import signals as project_signals
from website.project import tasks as node_tasks
from website.project.model import NodeUpdateError
from website.identifiers.tasks import update_ezid_metadata_on_change
from website.util import (api_url_for, api_v2_url, get_headers_from_request,
sanitize, web_url_for)
from website.util.permissions import (ADMIN, CREATOR_PERMISSIONS,
DEFAULT_CONTRIBUTOR_PERMISSIONS, READ,
WRITE, expand_permissions,
reduce_permissions)
from .base import BaseModel, Guid, GuidMixin, GuidMixinQuerySet
logger = logging.getLogger(__name__)
class AbstractNodeQuerySet(GuidMixinQuerySet):
def get_roots(self):
return self.filter(id__in=self.exclude(type='osf.collection').exclude(type='osf.quickfilesnode').values_list('root_id', flat=True))
def get_children(self, root, active=False):
# If `root` is a root node, we can use the 'descendants' related name
# rather than doing a recursive query
if root.id == root.root_id:
query = root.descendants.exclude(id=root.id)
if active:
query = query.filter(is_deleted=False)
return query
else:
sql = """
WITH RECURSIVE descendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY[parent_id] as pids
FROM %s
%s
WHERE is_node_link IS FALSE AND parent_id = %s %s
UNION ALL
SELECT
d.parent_id,
s.child_id,
d.level + 1,
d.pids || s.parent_id
FROM descendants AS d
JOIN %s AS s
ON d.child_id = s.parent_id
WHERE s.is_node_link IS FALSE AND %s = ANY(pids)
) SELECT array_agg(DISTINCT child_id)
FROM descendants
WHERE parent_id = %s;
"""
with connection.cursor() as cursor:
node_relation_table = AsIs(NodeRelation._meta.db_table)
cursor.execute(sql, [
node_relation_table,
AsIs('LEFT JOIN osf_abstractnode ON {}.child_id = osf_abstractnode.id'.format(node_relation_table) if active else ''),
root.pk,
AsIs('AND osf_abstractnode.is_deleted IS FALSE' if active else ''),
node_relation_table,
root.pk,
root.pk])
row = cursor.fetchone()[0]
if not row:
return AbstractNode.objects.none()
return AbstractNode.objects.filter(id__in=row)
def can_view(self, user=None, private_link=None):
qs = self.filter(is_public=True)
if private_link is not None:
if isinstance(private_link, PrivateLink):
private_link = private_link.key
if not isinstance(private_link, basestring):
raise TypeError('"private_link" must be either {} or {}. Got {!r}'.format(str, PrivateLink, private_link))
qs |= self.filter(private_links__is_deleted=False, private_links__key=private_link)
if user is not None:
if isinstance(user, OSFUser):
user = user.pk
if not isinstance(user, int):
raise TypeError('"user" must be either {} or {}. Got {!r}'.format(int, OSFUser, user))
sqs = Contributor.objects.filter(node=models.OuterRef('pk'), user__id=user, read=True)
qs |= self.annotate(can_view=models.Exists(sqs)).filter(can_view=True)
qs |= self.extra(where=['''
"osf_abstractnode".id in (
WITH RECURSIVE implicit_read AS (
SELECT "osf_contributor"."node_id"
FROM "osf_contributor"
WHERE "osf_contributor"."user_id" = %s
AND "osf_contributor"."admin" is TRUE
UNION ALL
SELECT "osf_noderelation"."child_id"
FROM "implicit_read"
LEFT JOIN "osf_noderelation" ON "osf_noderelation"."parent_id" = "implicit_read"."node_id"
WHERE "osf_noderelation"."is_node_link" IS FALSE
) SELECT * FROM implicit_read
)
'''], params=(user, ))
return qs
class AbstractNodeManager(TypedModelManager, IncludeManager):
def get_queryset(self):
qs = AbstractNodeQuerySet(self.model, using=self._db)
# Filter by typedmodels type
return self._filter_by_type(qs)
# AbstractNodeQuerySet methods
def get_roots(self):
return self.get_queryset().get_roots()
def get_children(self, root, active=False):
return self.get_queryset().get_children(root, active=active)
def can_view(self, user=None, private_link=None):
return self.get_queryset().can_view(user=user, private_link=private_link)
class AbstractNode(DirtyFieldsMixin, TypedModel, AddonModelMixin, IdentifierMixin,
NodeLinkMixin, CommentableMixin, SpamMixin,
Taggable, Loggable, GuidMixin, BaseModel):
"""
All things that i
|
nherit from AbstractNode will
|
appear in
the same table and will be differentiated by the `type` column.
"""
#: Whether this is a pointer or not
primary = True
settings_type = 'node' # Needed for addons
FIELD_ALIASES = {
# TODO: Find a bet
|
shawncaojob/LC
|
LINTCODE/41_maximum_subarray.py
|
Python
|
gpl-3.0
| 410
| 0.004878
|
class Solution:
"""
@param: nums: A list of integers
@return: A integer indicate the sum of max subarray
"""
def maxSubArray(self, nums):
|
# write your code here
if not nums or len(nums) == 0: return 0
cur = res = nums[0]
for i in xrange(1, len(nums)):
cur = max(cur
|
+ nums[i], nums[i])
res = max(res, cur)
return res
|
plamut/superdesk
|
server/apps/publish/content/__init__.py
|
Python
|
agpl-3.0
| 524
| 0
|
# -*- coding: utf-8; -*-
#
# This file is part of
|
Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from .publish import ArchivePublishResource, ArchivePublishService # NOQA
from .
|
kill import KillPublishResource, KillPublishService # NOQA
from .correct import CorrectPublishResource, CorrectPublishService # NOQA
|
DailyActie/Surrogate-Model
|
01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/test/test_aningvtree.py
|
Python
|
mit
| 742
| 0
|
import unittest
from openmdao.main.api import VariableTree, Component, Assembly
from openmdao.main.datatypes.api import Float, VarTree
class VT(VariableTree):
x = Float()
class C(Component):
x = Float(iotype='in')
out = Float(iotype='out')
def execute(self):
self.out = 2 * self.x
class A(Assembly):
v
|
t = VarTree(VT(), iotype='in')
def configure(self):
self.add('c', C())
self.driver.workflow.add(['c'])
self.connect('vt.x', 'c.x')
self.create_passthrough('c.out')
class TestCase(unittest.TestCase):
def test_vtree(self):
a = A()
a.vt.x = 1.0
a.run()
self.assertEqual(a.out, 2.0)
if __name
|
__ == '__main__':
unittest.main()
|
AndyHannon/ctrprogress
|
ranker.py
|
Python
|
mit
| 11,305
| 0.008138
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import webapp2,jinja2,os
import logging
import wowapi
from datetime import datetime
from google.appengine.ext import ndb
from google.appengine.api.memcache import Client
from google.appengine.api import taskqueue
from google.appengine.api.taskqueue import Queue
from google.appengine.api.taskqueue import QueueStatistics
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
class Progression(ndb.Model):
raidname = ndb.StringProperty(indexed = True, required = True)
numbosses = ndb.IntegerProperty(default = 0, required = True)
normal = ndb.IntegerProperty(default = 0, required = True)
heroic = ndb.IntegerProperty(default = 0, required = True)
mythic = ndb.IntegerProperty(default = 0, required = True)
class Group(ndb.Model):
name = ndb.StringProperty(indexed=True, required = True)
toons = ndb.StringProperty(repeated=True)
brf = ndb.StructuredProperty(Progression, required = True)
hm = ndb.StructuredProperty(Progression, required = True)
lastupdated = ndb.DateTimeProperty(auto_now=True)
avgilvl = ndb.IntegerProperty(default = 0)
class Global(ndb.Model):
lastupdated = ndb.DateTimeProperty(auto_now=True)
class ProgressBuilder(webapp2.RequestHandler):
difficulties = ['normal','heroic','mythic']
hmbosses = ['Kargath Bladefist','The Butcher','Brackenspore','Tectus','Twin Ogron','Ko\'ragh','Imperator Mar\'gok']
brfbosses = ['Oregorger','Gruul','The Blast Furnace','Hans\'gar and Franzok','Flamebender Ka\'graz','Kromog','Beastlord Darmac','Operator Thogar','The Iron Maidens','Blackhand']
def post(self):
start = self.request.get('start')
end = self.request.get('end')
logging.info('%s %s' % (start,end))
importer = wowapi.Importer()
q = Group.query()
groups = q.fetch()
logging.info('Builder task for range %s to %s started' % (start, end))
for group in groups:
firstchar = group.name[0]
if firstchar < start or firstchar > end:
continue
data = list()
importer.load(group.toons, data)
progress = dict()
self.parse(ProgressBuilder.difficulties, ProgressBuilder.hmbosses,
data, 'Highmaul', progress)
self.parse(ProgressBuilder.difficulties, ProgressBuilder.brfbosses,
data, 'Blackrock Foundry', progress)
# calculate the avg ilvl values from the toon data
group.avgilvl = 0
numtoons = 0
for toon in data:
if 'items' in toon:
numtoons += 1
group.avgilvl += toon['items']['averageItemLevel']
if numtoons != 0:
group.avgilvl /= numtoons
self.response.write(group.name + " data generated<br/>")
# update the entry in ndb with the new progression data for this
# group. this also checks to make sure that the progress only ever
# increases, in case of wierdness with the data.
group.brf.normal = max(group.brf.normal,
progress['Blackrock Foundry']['normal'])
group.brf.heroic = max(group.brf.heroic,
progress['Blackrock Foundry']['heroic'])
group.brf.mythic = max(group.brf.mythic,
progress['Blackrock Foundry']['mythic'])
group.hm.normal = max(group.hm.normal,
progress['Highmaul']['normal'])
group.hm.heroic = max(group.hm.heroic,
progress['Highmaul']['heroic'])
group.hm.mythic = max(group.hm.mythic,
progress['Highmaul']['mythic'])
group.put()
logging.info('Finished building group %s' % group.name)
logging.info('Builder task for range %s to %s completed' % (start, end))
# update the last updated for the whole dataset. don't actually
# have to set the time here, the auto_now flag on the property does
# it for us.
q = Global.query()
r = q.fetch()
if (len(r) == 0):
g = Global()
else:
g = r[0]
g.put()
def parse(self, difficulties, bosses, toondata, raidname, progress):
progress[raidname] = dict()
bossdata = dict()
for boss in bosses:
bossdata[boss] = dict()
for d in difficulties:
bossdata[boss][d] = dict()
bossdata[boss][d]['times'] = list()
bossdata[boss][d]['timeset'] = set()
bossdata[boss][d]['killed'] = True
bossdata[boss][d]['killtime'] = 0
bossdata[boss][d]['killinv'] = 0
# loop through each toon in the data from the blizzard API
for toon in toondata:
if 'progression' not in toon:
continue
# get just the raid data for this toon
raids = toon['progression']['raids']
# this filters the raid data down to just the raid we're looking
# at this pass
raid = [d for d in raids if d['name'] == raidname][0]
# loop through the individual bosses and get the timestamp for
# the last kill for this toon for each boss
for boss in bosses:
# this filters the raid data down to just a single boss
b = [d for d in raid['bosses'] if d['name'] == boss][0]
# loop through each difficulty level and grab each timestamp.
# skip any timestamps of zero. that means the toon never
# killed the boss.
for d in difficulties:
if b[d+'Timestamp'] != 0:
bossdata[boss][d]['times'].append(b[d+'Timestamp'])
bossdata[boss][d]['timeset'].add(b[d+'Timestamp'])
# loop back through the difficulties and bosses and build up the
# progress data
for d in difficulties:
progress[raidname][d] = 0
for boss in bosses:
# for each boss, grab the set of unique timestamps and sort it
# with the last kill first
timelist = list(bossdata[boss][d]['timeset'])
timelist.sort(reverse=True)
# now loop through that time list. a kill involving 5 or more
# players from the group is considered a kill for the whole
# group and counts towards progress.
for t in timelist:
count = bossdata[boss][d]['times'].count(t)
if count >= 5:
bossdata[boss][d]['killed'] = True
bossdata[boss][d]['killtime'] = t
bossdata[boss][d]['killinv'] = count
progress[raidname][d] += 1
ts = datetime.fromtimestamp(t/1000)
# logging.info('count for %s %s at time %s (involved %d members)' % (boss, d, ts.strftime("%Y-%m-%d %H:%M:%S"), count))
br
|
eak
class Ranker(webapp2.RequestHandler):
def get(self):
queue = Queue()
stats = queue.fetch_statistics()
template_values={
'tasks': stats.tasks,
'in_flight': stats.in_flight,
}
template = JINJA_ENVIRONMENT.get_template('templates/ranker.html')
self.response.write(template.render(template_values))
def post(self):
|
# refuse to start the tasks if there are some already running
queue = Queue()
stats = queue.fetch_statistics()
if stats.tasks == 0:
print 'nop'
taskqueue.add(url='/builder', params={'start':'A', 'end':'B'})
taskqueue.add(url='/builder', params={'start':'C', 'end':'E'})
taskqueue.add(url='/builder', params={'start':'F', 'end':'G'})
|
endlessm/eos-event-recorder-daemon
|
tests/test-opt-out-integration.py
|
Python
|
gpl-2.0
| 8,113
| 0.002588
|
#!/usr/bin/env python3
# Copyright 2014, 2015, 2017 Endless Mobile, Inc.
# This file is part of eos-event-recorder-daemon.
#
# eos-event-recorder-daemon is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at your
# option) any later version.
#
# eos-event-recorder-daemon is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with eos-event-recorder-daemon. If not, see
# <http://www.gnu.org/licenses/>.
import configparser
import dbus
import os
import shlex
import subprocess
import taptestrunner
import tempfile
import time
import unittest
import dbusmock
_METRICS_IFACE = 'com.endlessm.Metrics.EventRecorderServer'
class TestOptOutIntegration(dbusmock.DBusTestCase):
"""
Makes sure the Enabled property can be set and retrieved.
"""
@classmethod
def setUpClass(klass):
"""Set up a mock system bus."""
klass.start_system_bus()
klass.dbus_con = klass.get_dbus(system_bus=True)
def setUp(self):
"""Start the event recorder on the mock system bus."""
# Put polkitd mocks onto the mock system bus.
(self.polkit_popen, self.polkit_obj) = self.spawn_server_template('polkitd')
self.test_dir = tempfile.TemporaryDirectory(
|
prefix='eos-event-recorder-daemon-test.')
persistent_cache_directory = os.path.join(self.test_dir.name, 'cache')
os.mkdir(persistent_cache_directory)
escaped_dir = shlex.quote(persistent_cache_directory)
persistent_cache_dir_arg = '--persistent-cache-directory=' + escaped_dir
self.config_file = os.path.join(self.test_dir.name, 'permissions.conf')
config_file_arg = '--config-file-path={}'.format(self.config_file)
# TODO: The daemon attem
|
pts to create CONFIG_DIR / cache-size.conf when
# launched; this will typically fail while running this test because
# either CONFIG_DIR does not exist, or it exists and is not owned by
# the user running the test. The daemon logs a warning in this case.
# (If the test is running as root or the metrics user, and the
# directory exists, then the test will overwrite the file within!)
# TODO: The daemon assumes it is running on an OSTree system and
# attempts to open /ostree/repo/config to determine whether to adjust
# the environment in its own configuration (self.config_file above).
# When running on a non-OSTree system such as a build server or
# development container, this fails, and logs a warning. (This could
# be addressed by, for example, checking ostree_sysroot_is_booted().)
# TODO: Address both issues above, then enable fatal warnings.
daemon_path = os.environ.get('EMER_PATH', './eos-metrics-event-recorder')
self.daemon = subprocess.Popen([daemon_path,
persistent_cache_dir_arg,
config_file_arg])
# Wait for the service to come up
self.wait_for_bus_object('com.endlessm.Metrics',
'/com/endlessm/Metrics', system_bus=True)
metrics_object = self.dbus_con.get_object('com.endlessm.Metrics',
'/com/endlessm/Metrics')
self.interface = dbus.Interface(metrics_object, _METRICS_IFACE)
def tearDown(self):
self.polkit_popen.terminate()
self.daemon.terminate()
self.polkit_popen.wait()
self.assertEqual(self.daemon.wait(), 0)
self.test_dir.cleanup()
def test_opt_out_readable(self):
"""Make sure the Enabled property exists."""
self.interface.Get(_METRICS_IFACE, 'Enabled',
dbus_interface=dbus.PROPERTIES_IFACE)
def test_opt_out_not_writable(self):
"""Make sure the Enabled property is not writable."""
with self.assertRaisesRegex(dbus.DBusException, 'org\.freedesktop\.DBus\.Error\.InvalidArgs'):
self.interface.Set(_METRICS_IFACE, 'Enabled', False,
dbus_interface=dbus.PROPERTIES_IFACE)
def test_set_enabled_authorized(self):
"""
Make sure the Enabled property's value persists and accessing SetEnabled
succeeds when it is set to allowed.
"""
# Check defaults look good and erase the file before our next change
self._check_config_file(enabled='true', uploading_enabled='false')
self.polkit_obj.SetAllowed(['com.endlessm.Metrics.SetEnabled'])
self.interface.SetEnabled(True)
self.assertTrue(self.interface.Get(_METRICS_IFACE, 'Enabled',
dbus_interface=dbus.PROPERTIES_IFACE))
self._check_config_file(enabled='true', uploading_enabled='true')
self.interface.SetEnabled(False)
self.assertFalse(self.interface.Get(_METRICS_IFACE, 'Enabled',
dbus_interface=dbus.PROPERTIES_IFACE))
self._check_config_file(enabled='false', uploading_enabled='false')
def test_set_enabled_unauthorized(self):
"""
Make sure that accessing SetEnabled fails if not explicitly authorized.
"""
with self.assertRaisesRegex(dbus.DBusException, 'org\.freedesktop\.DBus\.Error\.AuthFailed'):
self.interface.SetEnabled(True)
def test_upload_doesnt_change_config(self):
"""
Make sure that calling UploadEvents() doesn't spontaneously enable
uploading. This seems implausible but did actually happen.
UploadEvents() causes the config to be re-read, triggering a change
notification on EmerPermissionsProvider:enabled, triggering a (no-op)
update of the Enabled D-Bus property to TRUE, which was bound to
EmerPermissionsProvider:uploading-enabled so caused that property to
be set to TRUE.
"""
# Check defaults look good and erase the file before our next change
self._check_config_file(enabled='true', uploading_enabled='false')
with self.assertRaisesRegex(dbus.exceptions.DBusException,
r'uploading is disabled') as context:
self.interface.UploadEvents()
self.assertEqual(context.exception.get_dbus_name(),
"com.endlessm.Metrics.Error.UploadingDisabled")
self._check_config_file(enabled='true', uploading_enabled='false')
def test_UploadEvents_fails_if_disabled(self):
self.polkit_obj.SetAllowed(['com.endlessm.Metrics.SetEnabled'])
self.interface.SetEnabled(False)
with self.assertRaisesRegex(dbus.exceptions.DBusException,
r'metrics system is disabled') as context:
self.interface.UploadEvents()
self.assertEqual(context.exception.get_dbus_name(),
"com.endlessm.Metrics.Error.MetricsDisabled")
def _check_config_file(self, enabled, uploading_enabled):
# the config file is written asynchronously by the daemon,
# so may not exist immediately after a change is made - wait
# for up to 1 second for it to be written
for i in range(20):
if os.path.exists(self.config_file):
break
else:
time.sleep(0.05)
config = configparser.ConfigParser()
self.assertEqual(config.read(self.config_file), [self.config_file])
self.assertEqual(config.get("global", "enabled"), enabled)
self.assertEqual(config.get("global", "uploading_enabled"), uploading_enabled)
# erase the file after reading it to guarantee that the next time it
# exists, it's up to date. the daemon doesn't read it once started.
os.unlink(self.config_file)
if __name__ == '__main__':
unittest.main(testRunner=taptestrunner.TAPTestRunner())
|
mrucci/moto
|
moto/ec2/models.py
|
Python
|
apache-2.0
| 121,494
| 0.001523
|
from __future__ import unicode_literals
import boto
import copy
import itertools
import re
import six
from collections import defaultdict
from datetime import datetime
from boto.ec2.instance import Instance as BotoInstance, Reservation
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
from boto.ec2.launchspecification import LaunchSpecification
from moto.core import BaseBackend
from moto.core.models import Model
from moto.core.utils import iso_8601_datetime_with_milliseconds
from .exceptions import (
EC2ClientError,
DependencyViolationError,
MissingParameterError,
InvalidParameterValueError,
InvalidParameterValueErrorTagNull,
InvalidDHCPOptionsIdError,
MalformedDHCPOptionsIdError,
InvalidKeyPairNameError,
InvalidKeyPairDuplicateError,
InvalidInternetGatewayIdError,
GatewayNotAttachedError,
ResourceAlreadyAssociatedError,
InvalidVPCIdError,
InvalidSubnetIdError,
InvalidNetworkInterfaceIdError,
InvalidNetworkAttachmentIdError,
InvalidSecurityGroupDuplicateError,
InvalidSecurityGroupNotFoundError,
InvalidPermissionNotFoundError,
InvalidRouteTableIdError,
InvalidRouteError,
InvalidInstanceIdError,
MalformedAMIIdError,
InvalidAMIIdError,
InvalidAMIAttributeItemValueError,
InvalidSnapshotIdError,
InvalidVolumeIdError,
InvalidVolumeAttachmentError,
InvalidDomainError,
InvalidAddressError,
InvalidAllocationIdError,
InvalidAssociationIdError,
InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError,
TagLimitExceeded,
InvalidID,
InvalidCIDRSubnetError,
InvalidNetworkAclIdError,
InvalidVpnGatewayIdError,
InvalidVpnConnectionIdError,
InvalidCustomerGatewayIdError,
)
from .utils import (
EC2_RESOURCE_TO_PREFIX,
EC2_PREFIX_TO_RESOURCE,
random_ami_id,
random_dhcp_option_id,
random_eip_allocation_id,
random_eip_association_id,
random_eni_attach_id,
random_eni_id,
random_instance_id,
random_internet_gateway_id,
random_ip,
random_nat_gateway_id,
random_key_pair,
random_private_ip,
random_public_ip,
random_reservation_id,
random_route_table_id,
generate_route_id,
split_route_id,
random_security_group_id,
random_snapshot_id,
random_spot_request_id,
|
random_subnet_id,
random_subnet_association_id,
random_volume_id,
random_vpc_id,
random_vpc_peering_connection_id,
generic_filter,
is_valid_resource_id,
get_prefix,
simple_a
|
ws_filter_to_re,
is_valid_cidr,
filter_internet_gateways,
filter_reservations,
random_network_acl_id,
random_network_acl_subnet_association_id,
random_vpn_gateway_id,
random_vpn_connection_id,
random_customer_gateway_id,
is_tag_filter,
)
def utc_date_and_time():
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
def validate_resource_ids(resource_ids):
for resource_id in resource_ids:
if not is_valid_resource_id(resource_id):
raise InvalidID(resource_id=resource_id)
return True
class InstanceState(object):
def __init__(self, name='pending', code=0):
self.name = name
self.code = code
class StateReason(object):
def __init__(self, message="", code=""):
self.message = message
self.code = code
class TaggedEC2Resource(object):
def get_tags(self, *args, **kwargs):
tags = self.ec2_backend.describe_tags(filters={'resource-id': [self.id]})
return tags
def add_tag(self, key, value):
self.ec2_backend.create_tags([self.id], {key: value})
def get_filter_value(self, filter_name):
tags = self.get_tags()
if filter_name.startswith('tag:'):
tagname = filter_name.replace('tag:', '', 1)
for tag in tags:
if tag['key'] == tagname:
return tag['value']
return ''
if filter_name == 'tag-key':
return [tag['key'] for tag in tags]
if filter_name == 'tag-value':
return [tag['value'] for tag in tags]
class NetworkInterface(TaggedEC2Resource):
def __init__(self, ec2_backend, subnet, private_ip_address, device_index=0,
public_ip_auto_assign=True, group_ids=None):
self.ec2_backend = ec2_backend
self.id = random_eni_id()
self.device_index = device_index
self.private_ip_address = private_ip_address
self.subnet = subnet
self.instance = None
self.attachment_id = None
self.public_ip = None
self.public_ip_auto_assign = public_ip_auto_assign
self.start()
self.attachments = []
# Local set to the ENI. When attached to an instance, @property group_set
# returns groups for both self and the attached instance.
self._group_set = []
group = None
if group_ids:
for group_id in group_ids:
group = self.ec2_backend.get_security_group_from_id(group_id)
if not group:
# Create with specific group ID.
group = SecurityGroup(self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id)
self.ec2_backend.groups[subnet.vpc_id][group_id] = group
if group:
self._group_set.append(group)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
security_group_ids = properties.get('SecurityGroups', [])
ec2_backend = ec2_backends[region_name]
subnet_id = properties.get('SubnetId')
if subnet_id:
subnet = ec2_backend.get_subnet(subnet_id)
else:
subnet = None
private_ip_address = properties.get('PrivateIpAddress', None)
network_interface = ec2_backend.create_network_interface(
subnet,
private_ip_address,
group_ids=security_group_ids
)
return network_interface
def stop(self):
if self.public_ip_auto_assign:
self.public_ip = None
def start(self):
self.check_auto_public_ip()
def check_auto_public_ip(self):
if self.public_ip_auto_assign:
self.public_ip = random_public_ip()
@property
def group_set(self):
if self.instance and self.instance.security_groups:
return set(self._group_set) | set(self.instance.security_groups)
else:
return self._group_set
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'PrimaryPrivateIpAddress':
return self.private_ip_address
elif attribute_name == 'SecondaryPrivateIpAddresses':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"')
raise UnformattedGetAttTemplateException()
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name == 'network-interface-id':
return self.id
elif filter_name in ('addresses.private-ip-address', 'private-ip-address'):
return self.private_ip_address
elif filter_name == 'subnet-id':
return self.subnet.id
elif filter_name == 'vpc-id':
return self.subnet.vpc_id
elif filter_name == 'group-id':
return [group.id for group in self._group_set]
filter_value = super(NetworkInterface, self).get_filter_value(filter_name)
if filter_value is None:
self.ec2_backend.raise_not_implemented_error(
"The filter '{0}' for DescribeNetworkInterfaces".format(filter_name)
)
return filter_value
class NetworkInterfaceBackend(object):
def __init__(self):
self.enis = {}
super(NetworkI
|
jos4uke/getSeqFlankBlatHit
|
lib/python2.7/site-packages/pybedtools/scripts/peak_pie.py
|
Python
|
gpl-2.0
| 5,681
| 0.000176
|
#!/usr/bin/env python
"""
Make a pie chart where peaks fall in annotations; see \
:mod:`pybedtools.contrib.Classifier` for more flexibility.
The results here are similar to CEAS (http://liulab.dfci.harvard.edu/CEAS/).
However, multi-featuretype classes are reported. That is, if a peak falls in
an exon in one isoform and an intron in another isoform, the class is "exon,
intron".
"""
import sys
import urllib
import urllib2
import argparse
import pybedtools
from collections import defaultdict
def make_pie(bed, gff, stranded=False, out='out.png',
include=None, exclude=None, thresh=0):
a = pybedtools.BedTool(bed)
b = pybedtools.BedTool(gff).remove_invalid()
c = a.intersect(b,
wao=True,
s=stranded,
stream=True)
# So we can grab just `a` features later...
afields = a.field_count()
# Where we can find the featuretype in the -wao output. Assumes GFF.
type_idx = afields + 2
# 3 different code paths depending on include/exclude to cut down on
# if/else checks.
#
# For un-included featuretypes, put them in the '.' category (unnannotated)
if include and exclude:
raise ValueError('Can only specify one of `include` or `exclude`.')
d = defaultdict(set)
if include:
for feature in c:
featuretype = feature[type_idx]
key = '\t'.join(feature[:afields])
if featuretype in include:
d[key].update([featuretype])
else:
d[key].update(['.'])
elif exclude:
for feature in c:
featuretype = feature[type_idx]
key = '\t'.join(feature[:afields])
if featuretype not in exclude:
d[key].update([featuretype])
else:
d[key].update(['.'])
else:
for feature in c:
featuretype = feature[type_idx]
key = '\t'.join(feature[:afields])
d[key].update([featuretype])
def labelmaker(x):
x.difference_update('.')
label = []
for i in list(x):
if i == 'three_prime_UTR':
i = "3' UTR"
if i == 'five_prime_UTR':
i = "5' UTR"
label.append(i)
return ', '.join(sorted(label))
# Prepare results for Google Charts API
npeaks = float(len(d))
count_d = defaultdict(int)
for peak, featuretypes in d.items():
if featuretypes == set('.'):
featuretype = 'unannotated'
else:
featuretype = labelmaker(featuretypes)
count_d[featuretype] += 1
results = count_d.items()
results.sort(key=lambda x: x[1])
labels, counts = zip(*results)
labels = []
counts_to_use = []
for label, count in results:
perc = count / npeaks * 100
if perc > thresh:
labels.append('%s: %s (%.1f%%)' % (label,
count,
perc))
counts_to_use.append(perc)
# Set up the Gchart data
data = {'cht': 'p',
'chs': '750x350',
'chd': 't:' + ','.join(map(str, counts_to_use)),
'chl': '|'.join(labels)}
# Encode it correctly
encoded_data = urllib.urlencode(data)
# Send request and get data; write to file
url = 'https://chart.googleapis.com/chart?'
req = urllib2.Request(url, encoded_data)
response = urllib2.urlopen(req)
f = open(out, 'w')
f.write(response.read())
f.close()
def main():
"""
Make a pie chart of features overlapping annotations (e.g., peaks in
introns, exons, etc)
"""
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
ap.add_argument('--bed', help='BED file of e.g. peaks')
ap.add_argument('--gff', help='GFF file of e.g. annotations')
ap.add_argument('--out', default='out.png', help='Output PNG file')
ap.add_argument('--stranded', action='store_true',
help='Use strand-specific intersections')
ap.add_argument('--include', nargs='*', help='Featuretypes to include')
ap.add_argument('--exclude', nargs='*', help='Featuretypes to exclude')
ap.add_argument('--thresh', type=float,
help='Threshold percentage below which output will be '
'suppressed')
ap.add_argument('--test', action='store_true',
help='Run test, overwriting all other args. Result will '
'be "out.png" in current directory.')
args = ap.parse_args()
if not (args.bed and args.gff) and not args.test:
ap.print_help()
sys.exit(1)
if not args.test:
if args.include and args.exclude:
raise ValueError('Cannot specify both --include and --exclude')
make_pie(bed=args.bed,
gff=args.gff,
out=args.out,
thresh=args.thresh,
stranded=args.stranded,
include=args.include,
exclude=args.exclude)
else:
make_pie(bed=pybedtools.example_filename('gdc.bed'),
gff=pybedtools.example_filename('gdc.gff'),
stranded=True,
|
out='out.png',
include=['exon',
'CDS',
'intron',
'five_prime_UTR',
|
'three_prime_UTR'])
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS).failed == 0:
main()
|
TaylorOshan/spint
|
spint/tests/test_gravity.py
|
Python
|
bsd-3-clause
| 67,670
| 0.004699
|
"""
Tests for gravity-style spatial interaction models
Test data is the Austria migration dataset used in Dennet's (2012) practical primer
on spatial interaction modeling. The data was made avialable through the
following dropbox link: http://dl.dropbox.com/u/8649795/AT_Austria.csv.
The data has been pre-filtered so that there are no intra-zonal flows,
Dennett, A. (2012). Estimating flows between geographical locations:get me
started in spatial interaction modelling (Working Paper No. 184). UCL: Citeseer.
"""
import unittest
import math
import numpy as np
from ..gravity import BaseGravity, Gravity, Production, Attraction, Doubly
class TestGravity(unittest.TestCase):
"""Tests for gravity-type models"""
def setUp(self):
self.f = np.array([1131, 1887, 69, 738, 98, 31, 43, 19, 1633,
14055, 416, 1276, 1850, 388, 303, 159, 2301, 20164,
1080, 1831, 1943, 742, 674, 407, 85, 379, 1597,
1608, 328, 317, 469, 114, 762, 1110, 2973, 1252,
1081, 622, 425, 262, 196, 2027, 3498, 346, 1332,
2144, 821, 274, 49, 378, 1349, 310, 851, 2117,
630, 106, 87, 424, 978, 490, 670, 577, 546,
569, 33, 128, 643, 154, 328, 199, 112, 587])
self.o = np.array(['AT11',
'AT11',
'AT11',
'AT11',
'AT11',
'AT11',
'AT11',
'AT11',
'AT12',
'AT12',
'AT12',
'AT12',
'AT12',
'AT12',
'AT12',
'AT12',
'AT13',
'AT13',
'AT13',
'AT13',
'AT13',
'AT13',
'AT13',
'AT13',
'AT21',
'AT21',
'AT21',
'AT21',
'AT21',
'AT21',
'AT21',
'AT21',
'AT22',
'AT22',
'AT22',
'AT22',
'AT22',
'AT22',
'AT22',
'AT22',
'AT31',
'AT31',
'AT31',
'AT31',
'AT31',
'AT31',
'AT31',
'AT31',
'AT32',
'AT32',
'AT32',
'AT32',
'AT32',
'AT32',
'AT32',
'AT32',
'AT33',
'AT33',
'AT33',
'AT33',
'AT33',
'AT33',
'AT33',
'AT33',
'AT34',
'AT34',
'AT34',
'AT34',
'AT34',
|
'AT34',
'AT34',
'AT34'])
self.d = np.array(['AT12',
'AT13',
'AT21',
'AT22',
'AT31',
'AT32',
'AT33',
'AT34',
'AT11',
|
'AT13',
'AT21',
'AT22',
'AT31',
'AT32',
'AT33',
'AT34',
'AT11',
'AT12',
'AT21',
'AT22',
'AT31',
'AT32',
'AT33',
'AT34',
'AT11',
'AT12',
'AT13',
'AT22',
'AT31',
'AT32',
'AT33',
'AT34',
'AT11',
'AT12',
'AT13',
'AT21',
'AT31',
'AT32',
'AT33',
'AT34',
'AT11',
'AT12',
'AT13',
'AT21',
'AT22',
'AT32',
'AT33',
'AT34',
'AT11',
'AT12',
'AT13',
'AT21',
'AT22',
'AT31',
'AT33',
'AT34',
'AT11',
'AT12',
'AT13',
'AT21',
'AT22',
'AT31',
'AT32',
'AT34',
'AT11',
'AT12',
'AT13',
'AT21',
'AT22',
'AT31',
'AT32',
'AT33'])
self.dij = np.array([103.001845,
84.204666,
220.811933,
132.00748,
214.511814,
246.933305,
390.85611,
505.089539,
103.001845,
45.796272,
216.994739,
129.878172,
140.706671,
201.232355,
343.50075,
453.515594,
84.204666,
45.796272,
249.932874,
158.630661,
186.420738,
244.108305,
387.61776,
498.407152,
220.811933,
216.994739,
249.932874,
92.407958,
151.777157,
92.894408,
194.851669,
306.105825,
132.00748,
129.878172,
158.630661,
92.407958,
124.563096,
122.433524,
261.893783,
376.34667,
214.511814,
140.706671,
186.420738,
151.777157,
|
KronosKoderS/py_pushover
|
pypushover/__init__.py
|
Python
|
mit
| 324
| 0.006173
|
__version__ = "0
|
.2.7"
from pypushover.Constants import PRIORITIES, SOUNDS, OS
from pypushover._base import BaseManager, send, base_url, PushoverError
from pypushover import client, groups, license, message, verification
__all__ = ['PRIORITIES', 'SOUNDS', 'OS', 'client', 'groups', 'license', 'message'
|
, 'verification']
|
Samweli/inasafe
|
safe/impact_functions/volcanic/volcano_point_population/impact_function.py
|
Python
|
gpl-3.0
| 8,892
| 0
|
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Volcano Point on
Population Impact Function.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import numpy
from safe.impact_functions.bases.classified_vh_continuous_re import \
ClassifiedVHContinuousRE
from safe.impact_functions.volcanic.volcano_point_population\
.metadata_definitions import VolcanoPointPopulationFunctionMetadata
from safe.impact_functions.core import population_rounding, has_no_data
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
from safe.storage.raster import Raster
from safe.utilities.i18n import tr
from safe.common.utilities import (
format_int,
humanize_class,
create_classes,
create_label
)
from safe.gui.tools.minimum_needs.needs_profile import add_needs_parameters, \
filter_needs_parameters, get_needs_provenance_value
from safe.impact_reports.population_exposure_report_mixin import \
PopulationExposureReportMixin
from safe.definitions import no_data_warning
class VolcanoPointPopulationFunction(
ClassifiedVHContinuousRE,
PopulationExposureReportMixin):
"""Impact Function for Volcano Point on Population."""
_metadata = VolcanoPointPopulationFunctionMetadata()
def __init__(self):
super(VolcanoPointPopulationFunction, self).__init__()
PopulationExposureReportMixin.__init__(self)
# AG: Use the proper minimum needs, update the parameters
self.parameters = add_needs_parameters(self.parameters)
# TODO: alternatively to specifying the question here we should
# TODO: consider changing the 'population' metadata concept to 'people'
self.question = (
'In the event of a volcano point how many people might be '
'impacted?')
self.no_data_warning = False
# A set of volcano names
self.volcano_names = set()
self.hazard_zone_attribute = 'radius'
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
if get_needs_provenance_value(self.parameters) is None:
needs_provenance = ''
else:
needs_provenance = tr(get_needs_provenance_value(self.parameters))
if self.volcano_names:
sorted_volcano_names = ', '.join(sorted(self.volcano_names))
else:
sorted_volcano_names = tr('Not specified in data')
fields = [
tr('Map shows buildings affected in each of the volcano buffered '
'zones.'),
tr('Total population in the analysis area: %s') %
format_int(population_rounding(self.total_population)),
tr('<sup>1</sup>People need evacuation if they are within the '
'volcanic hazard zones.'),
tr('Volcanoes considered: %s.') % sorted_volcano_names,
]
if needs_provenance:
fields.append(needs_provenance)
if self.no_data_warning:
fields = fields + no_d
|
ata_warning
# include any generic exposure specific notes from definitions.py
fields = fields + self.exposure_notes()
# include any generic hazard specific notes from definitions.py
fields = fields +
|
self.hazard_notes()
return fields
def run(self):
"""Run volcano point population evacuation Impact Function.
Counts number of people exposed to volcano event.
:returns: Map of population exposed to the volcano hazard zone.
The returned dict will include a table with number of people
evacuated and supplies required.
:rtype: dict
:raises:
* Exception - When hazard layer is not vector layer
* RadiiException - When radii are not valid (they need to be
monotonically increasing)
"""
# Parameters
radii = self.parameters['distances'].value
# Get parameters from layer's keywords
volcano_name_attribute = self.hazard.keyword('volcano_name_field')
data_table = self.hazard.layer.get_data()
# Get names of volcanoes considered
if volcano_name_attribute in self.hazard.layer.get_attribute_names():
# Run through all polygons and get unique names
for row in data_table:
self.volcano_names.add(row[volcano_name_attribute])
# Run interpolation function for polygon2raster
interpolated_layer, covered_exposure_layer = \
assign_hazard_values_to_exposure_data(
self.hazard.layer,
self.exposure.layer,
attribute_name=self.target_field
)
# Initialise affected population per categories
impact_category_ordering = []
for radius in radii:
category = tr('Radius %s km ' % format_int(radius))
self.affected_population[category] = 0
impact_category_ordering.append(category)
self.impact_category_ordering = impact_category_ordering
if has_no_data(self.exposure.layer.get_data(nan=True)):
self.no_data_warning = True
# Count affected population per polygon and total
for row in interpolated_layer.get_data():
# Get population at this location
population = row[self.target_field]
if not numpy.isnan(population):
population = float(population)
# Update population count for this category
category = tr('Radius %s km ' % format_int(
row[self.hazard_zone_attribute]))
self.affected_population[category] += population
# Count totals
self.total_population = population_rounding(
int(numpy.nansum(self.exposure.layer.get_data())))
self.minimum_needs = [
parameter.serialize() for parameter in
filter_needs_parameters(self.parameters['minimum needs'])
]
# Create style
colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
classes = create_classes(
covered_exposure_layer.get_data().flat[:], len(colours))
interval_classes = humanize_class(classes)
# Define style info for output polygons showing population counts
style_classes = []
for i in xrange(len(colours)):
style_class = dict()
style_class['label'] = create_label(interval_classes[i])
if i == 1:
label = create_label(
interval_classes[i],
tr('Low Population [%i people/cell]' % classes[i]))
elif i == 4:
label = create_label(
interval_classes[i],
tr('Medium Population [%i people/cell]' % classes[i]))
elif i == 7:
label = create_label(
interval_classes[i],
tr('High Population [%i people/cell]' % classes[i]))
else:
label = create_label(interval_classes[i])
style_class['label'] = label
style_class['quantity'] = classes[i]
style_class['colour'] = colours[i]
style_class['transparency'] = 0
style_classes.append(style_class)
# Override style info with new classes and name
style_info = dict(
target_field=None,
style_classes=style_classes,
style_type='rasterStyle')
impact_data = self.generate_data()
# Create vector layer and return
extra_keywords = {
'target_field': self.target_field,
'map_title': self.map_title(),
'legend_notes': self.metadata().key('legend_notes'),
|
Shinichi-Nakagawa/xp2015_baseball_tools
|
service/__init__.py
|
Python
|
mit
| 29
| 0
|
_
|
_author__ = 'shinyorke_mbp'
| |
nwokeo/supysonic
|
venv/lib/python2.7/site-packages/psycopg2/tests/test_cancel.py
|
Python
|
agpl-3.0
| 3,785
| 0.000264
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_cancel.py - unit test for query cancellation
#
# Copyright (C) 2010-2011 Jan Urbański <wulczer@wulczer.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
import threading
import psycopg2
import psycopg2.extensions
from psycopg2 import extras
from testconfig import dsn
from testutils import unittest, ConnectingTestCase, skip_before_postgres, slow
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.conn.commit()
def test_empty_cancel(self):
self.conn.cancel()
@slow
@skip_before_postgres(8, 2)
def test_cancel(self):
errors = []
def neverending(conn):
cur = conn.cursor()
try:
self.assertRaises(psycopg2.extensions.QueryCanceledError,
cur.execute, "select pg_sleep(60)")
# make sure the connection still works
conn.rollback()
cur.execute("select 1")
self.assertEqual(cur.fetchall(), [(1, )])
except Exception, e:
errors.append(e)
raise
def canceller(conn):
cur = conn.cursor()
try:
conn.cancel()
except Exception, e:
errors.append(e)
raise
del cur
thread1 = threading.Thread(target=neverending, args=(self.conn, ))
# wait a bit to make sure that the other thread is already in
# pg_sleep -- ugly and racy, but the chances are ridiculously low
thread2 = threading.Timer(0.3, canceller, args=(self.conn, ))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertEqual(errors, [])
@slow
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10)")
time.sleep(1)
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(psycopg2.extensions.QueryCanceledError,
extras.wait_select, async_conn)
cur.execute("select 1")
extras.wait_select(async_conn)
|
self.assertEqual(cur.fetchall(), [(1, )])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
async_conn.close()
self.assertTrue(async_conn.closed)
def test_suite():
|
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
coddingtonbear/inthe.am
|
docs/conf.py
|
Python
|
agpl-3.0
| 8,300
| 0.000602
|
# type: ignore
#
# Inthe.AM documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 13 15:53:25 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.imgmath",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Inthe.AM"
copyright = "2015, Adam Coddington"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
#
|
relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths th
|
at contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "IntheAMdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "IntheAM.tex", "Inthe.AM Documentation", "Adam Coddington", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "intheam", "Inthe.AM Documentation", ["Adam Coddington"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"IntheAM",
"Inthe.AM Documentation",
"Adam Coddington",
"IntheAM",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_sho
|
buchuki/pyjaco
|
tests/basic/lambda2.py
|
Python
|
mit
| 67
| 0.014925
|
x = l
|
amb
|
da x: (lambda x: (lambda x: x + 2)(x+2))(x+2)
print x(2)
|
miyuchina/mistletoe
|
test/test_latex_token.py
|
Python
|
mit
| 544
| 0
|
import unittest
from mistletoe.span_token impo
|
rt tokenize_inner
from mistletoe.latex_token import Math
from mistletoe.latex_renderer import LaTe
|
XRenderer
class TestLaTeXToken(unittest.TestCase):
def setUp(self):
self.renderer = LaTeXRenderer()
self.renderer.__enter__()
self.addCleanup(self.renderer.__exit__, None, None, None)
def test_span(self):
token = next(iter(tokenize_inner('$ 1 + 2 = 3 $')))
self.assertIsInstance(token, Math)
self.assertEqual(token.content, '$ 1 + 2 = 3 $')
|
ar4s/django
|
django/db/models/fields/__init__.py
|
Python
|
bsd-3-clause
| 90,874
| 0.000694
|
import collections.abc
import copy
import datetime
import decimal
import math
import operator
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import partialmethod, total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.duration import duration_microseconds, duration_string
from django.utils.functional import Promise, cached_property
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
__all__ = [
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveBigIntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField',
'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField',
]
class Empty:
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
descriptor_class = DeferredAttribute
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.abc.Iterator):
choices = list(choices)
self.choices = choices
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self._db_tablespace = db_tablespace
self.auto_created = auto_created
# Adjust the appropriate creati
|
on counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
|
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super().__str__()
model = self.model
return '%s.%s' % (model._meta.label, self.name)
def __repr__(self):
"""Display the module, class, and name of the field."""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_choices(),
*self._check_db_index(),
*self._check_null_allowed_for_primary_keys(),
*self._check_backend_specific_checks(**kwargs),
*self._check_validators(),
*self._check_deprecation_details(),
]
def _check_field_name(self):
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
|
zcbenz/cefode-chromium
|
tools/git/move_source_file.py
|
Python
|
bsd-3-clause
| 5,348
| 0.010845
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Moves a C++ file to a new location, updating any include paths that
point to it, and re-ordering headers as needed. Updates include
guards in moved header files. Assumes Chromium coding style.
Attempts to update paths used in .gyp(i) files, but does not reorder
or restructure .gyp(i) files in any way.
Updates full-path references to files in // comments in source files.
Must run in a git checkout, as it relies on git grep for a fast way to
find files that reference the moved file.
"""
import os
import re
import subprocess
import sys
import mffr
if __name__ == '__main__':
# Need to add the directory containing sort-headers.py to the Python
# classpath.
sys.path.append(os.path.abspath(os.path.join(sys.path[0], '..')))
sort_headers = __import__('sort-headers')
HANDLED_EXTENSIONS = ['.cc', '.mm', '.h', '.hh']
def MakeDestinationPath(from_path, to_path):
"""Given the from and to paths, return a correct destination path.
The initial destination path may either a full path or a directory,
in which case the path must end with /. Also does basic sanity
checks.
"""
if os.path.splitext(from_path)[1] not in HANDLED_EXTENSIONS:
raise Exception('Only intended to move individual source files.')
dest_extension = os.path.splitext(to_path)[1]
if dest_extension not in HANDLED_EXTENSIONS:
if to_path.endswith('/') or to_path.endswith('\\'):
to_path += os.path.basename(from_path)
else:
raise Exception('Destination must be either full path or end with /.')
return to_path
def MoveFile(from_path, to_path):
"""Performs a git mv command to move a file from |from_path| to |to_path|.
"""
if not os.system('git mv %s %s' % (from_path, to_path)) == 0:
raise Exception('Fatal: Failed to run git mv command.')
def UpdatePostMove(from_path, to_path):
"""Given a file that has moved from |from_path| to |to_path|,
updates the moved file's include guard to match the new path and
updates all references to the file in other source files. Also tries
to update references in .gyp(i) files using a heuristic.
"""
# Include paths always use forward slashes.
from_path = from_path.replace('\\', '/')
to_path = to_path.replace('\\', '/')
if os.path.splitext(from_path)[1] in ['.h', '.hh']:
UpdateIncludeGuard(from_path, to_path)
# Update include/import references.
files_with_changed_includes = mffr.MultiFileFindReplace(
r'(#(include|import)\s*["<])%s([>"])' % re.escape(from_path),
r'\1%s\3' % to_path,
['*.cc', '*.h', '*.m', '*.mm'])
# Reorder headers in files that changed.
for changed_file in files_with_changed_includes:
def AlwaysConfirm(a, b): return True
sort_headers.FixFileWithConfirmFunction(changed_file, AlwaysConfirm)
# Update comments; only supports // comments, which are primarily
# used in our code.
#
# This work takes a bit of time. If this script starts feeling too
# slow, one good way to speed it up is to make the comment handling
# optional under a flag.
mffr.MultiFileFindReplace(
r'(//.*)%s' % re.escape(from_path),
r'\1%s' % to_path,
['*.cc', '*.h', '*.m', '*.mm'])
# Update references in .gyp(i) files.
def PathMinusFirstComponent(path):
"""foo/bar/baz -> bar/baz"""
parts = re.split(r"[/\\]", path, 1)
if len(parts) == 2:
return parts[1]
else:
return parts[0]
mffr.MultiFileFindReplace(
r'([\'"])%s([\'"])' % re.escape(PathMinusFirstComponent(from_path)),
r'\1%s\2' % PathMinusFirstComponent(to_path),
['*.gyp*'])
def MakeIncludeGuardName(path_from_root):
"""Returns an include guard name given a path from root."""
guard = path_from_root.replace('/', '_')
guard = guard.replace('\\', '_')
guard = guard.replace('.', '_')
guard += '_'
return guard.upper()
def UpdateIncludeGuard(old_path, new_path):
"""Updates the include guard in a file now residing at |new_path|,
previously residing at |old_path|, with an up-to-date include guard.
Errors out if an include guard per Chromium style guide cannot be
found for the old path.
"""
old_guard = MakeIncludeGuardName(old_path)
new_guard = MakeIncludeGuardName(new_path)
with open(new_path) as f:
contents = f.read()
new_contents = contents.replace(old_guard, ne
|
w_guard)
if new_contents == contents:
raise Exception(
'Error updating include guard; perhaps old guard is not per style guide?')
with open(new_path, 'w') as f:
f.write(new_contents)
def main():
if not os.path.isdir('.git'):
print 'Fatal: You must run from the root of a git checkout.'
return 1
args = sys.argv[1:]
if not len(args) in [2, 3]:
print ('Usage: move_source_file.py [--already-moved] FROM_PATH TO_PATH'
'\n\n%s' % __doc__)
return 1
already
|
_moved = False
if args[0] == '--already-moved':
args = args[1:]
already_moved = True
from_path = args[0]
to_path = args[1]
to_path = MakeDestinationPath(from_path, to_path)
if not already_moved:
MoveFile(from_path, to_path)
UpdatePostMove(from_path, to_path)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mparra-mpz/CURIOSITY
|
CURIOSITY/test/CommunicationTest.py
|
Python
|
gpl-2.0
| 1,149
| 0.007833
|
#! /usr/bin/env python
import unittest
import time
from Communication import Communication
class CommunicationTest(unittest.TestCase):
def setUp(self):
'''
Verify environment is setup properly.
'''
self.controller = Communication()
self.b_list = self.controller.get_bluetooth_list()
def tearDown(self):
'''
Verify environment is tore
|
down properly.
'''
pass
def test_get_bluetooth_list(self):
'''
Verify that the bluetooth list was retrieve without problems.
'''
value = False
if "Empty" not in self.b_list[0]:
value = True
self.assertTrue(value)
def test_send(self):
'''
Verify that the instruction was send without problems.
'''
|
for b_name in self.b_list:
if "CURIOSITY"in b_name:
break
self.controller.connect(b_name)
value = self.controller.send("Hello")
time.sleep(5)
self.controller.disconnect()
self.assertTrue(value)
if __name__ == '__main__':
unittest.main()
|
maminian/skewtools
|
scripts/animate_particles_2d_labframe.py
|
Python
|
gpl-3.0
| 1,101
| 0.070845
|
from numpy import *
from matplotlib import pyplot
import scripts.skewtools as st
import sys
X,Y,t,Pe = st.importDatasets(sys.argv[1],'X','Y','Time','Peclet')
figscale = 5.
fig,ax = pyplot.subplots(1,1,figsize=(4*figscale,figscale))
uwall = 2./3.
xmax = X.max() + uwall*Pe*t[-1]
for i in range(len(t)):
#for i in [52]:
ax.cla()
# ax.hold(True)
ax.plot([0,xmax],[1,1],linewidth=0.5,color='k')
ax.plot([0,xmax],[-1,-1],linewidth=0.5,color='k')
subset1 = ((Y[:,i]<1.)*(Y[:,i] > -1.))
subset2
|
= ~subset1
ax.scatter(X[subset1,i],Y[subset1,i],facecolor=[0,0,0.9],edgecolor=[0,0,0,0],s=1,alpha=0.2)
ax.scatter(X[subset2,i],Y[subset2,i],facecolor=[0.9,0,0],edgecolor=[0,0,0,0],s=
|
1,alpha=0.2)
# ax.hist2d(X[subset,i] + uwall*Pe*t[i],Y[subset,i],cmap=pyplot.cm.inferno,)
# ax.hold(False)
ax.set_xlim([0.,xmax])
ax.set_ylim([-1.05,1.05])
print '%i active particles, %i of %i frames'%(sum(subset1),i,len(t)-1)
pyplot.savefig('cloudframe_'+str(i).zfill(4)+'.png',dpi=80,bbox_inches='tight')
# end for
#pyplot.tight_layout()
|
wilsonkichoi/zipline
|
zipline/sources/data_frame_source.py
|
Python
|
apache-2.0
| 5,146
| 0
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import numpy as np
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Data source that yields from a pandas DataFrame.
:Axis layout:
* columns : sids
* index : datetime
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the DataFrame
assert isinstance(data.columns, pd.Int64Index)
# TODO is ffilling correct/necessary?
# Forward fill prices
self.data = data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.index[0])
self.end = kwargs.get('end', self.data.index[-1])
self.sids = self.data.columns
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(price) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
'price': price,
# Just chose something large
# if no volume available.
'volume': 1e9,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Data source that yields from a pandas Panel.
:Axis layout:
* items : sids
* major_axis : datetime
* minor_axis : price, volume, ...
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the Panel
assert isinstance(data.items, pd.Int64Index)
# TODO is ffilling correct/necessary?
# forward fill with volumes of 0
self.data = data.fillna(value={'volume': 0})
# Unpack config dictiona
|
ry with default values.
self.start = kwargs.get('start', self.data.major_axis[0])
self.end = kwargs.get('end', self.data.major_axis[-1])
self.sids = self.data.items
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
|
self.started_sids = set()
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(series['price']):
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
|
mgadi/naemonbox
|
sources/psdash/pyzmq-13.1.0/examples/mongodb/controller.py
|
Python
|
gpl-2.0
| 3,050
| 0.003934
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010 Justin Riley
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import zmq
import pymongo
import pymongo.json_util
import json
class MongoZMQ(object):
"""
ZMQ server that adds/fetches documents (ie dictionaries) to a MongoDB.
NOTE: mongod must be started before using this class
"""
def __init__(self, db_name, table_name, bind_addr="tcp://127.0.0.1:5000"):
"""
bind_addr: address to bind zmq socket on
db_name: name of database to write to (created if doesnt exist)
table_name: name of mongodb 'table' in the db to write to (created if doesnt exist)
"""
self._bind_addr = bind_addr
self._db_name = db_name
self._table_name = table_name
self._conn = pymongo.Connection()
self._db = self._conn[self._db_name]
self._table = self._db[self._table_name]
def _doc_to_json(self, doc):
return json.dumps(doc,default=pymongo.json_util.default)
def add_document(self, doc):
"""
Inserts a document (dictionary) into mongo database table
"""
print 'adding docment %s' % (doc)
try:
self._table.insert(doc)
except Exception,e:
return 'Error: %s' % e
def get_document_by_keys(self, keys):
"""
Attempts to return a single document from database table that matches
each key/value in keys dictionary.
"""
print 'attempting to retrieve document using keys: %s' % keys
try:
return self._table.find_one(keys)
except Exception,e:
return 'Error: %s' % e
def start(self):
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.bind(self._bind_addr)
while True:
msg = socket.recv_multipart()
print "Received msg: ", msg
if len(msg) != 3:
error_m
|
sg = 'invalid message received: %s'
|
% msg
print error_msg
reply = [msg[0], error_msg]
socket.send_multipart(reply)
continue
id = msg[0]
operation = msg[1]
contents = json.loads(msg[2])
# always send back the id with ROUTER
reply = [id]
if operation == 'add':
self.add_document(contents)
reply.append("success")
elif operation == 'get':
doc = self.get_document_by_keys(contents)
json_doc = self._doc_to_json(doc)
reply.append(json_doc)
else:
print 'unknown request'
socket.send_multipart(reply)
def main():
MongoZMQ('ipcontroller','jobs').start()
if __name__ == "__main__":
main()
|
pelson/conda-build
|
tests/test_render.py
|
Python
|
bsd-3-clause
| 1,460
| 0.000685
|
import os
from conda_build import api
from conda_build import render
def test_output_with_noarch_says_noarch(testing_metadata):
testing_met
|
adata.meta['build']['noarch'] = 'python'
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_output_with_noarch_python_says_noarch(testing_metadata):
testing_metadata.meta['build']['no
|
arch_python'] = True
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_reduce_duplicate_specs(testing_metadata):
reqs = {'build': ['exact', 'exact 1.2.3 1', 'exact >1.0,<2'],
'host': ['exact', 'exact 1.2.3 1']
}
testing_metadata.meta['requirements'] = reqs
render._simplify_to_exact_constraints(testing_metadata)
assert (testing_metadata.meta['requirements']['build'] ==
testing_metadata.meta['requirements']['host'])
simplified_deps = testing_metadata.meta['requirements']
assert len(simplified_deps['build']) == 1
assert 'exact 1.2.3 1' in simplified_deps['build']
def test_pin_run_as_build_preserve_string(testing_metadata):
m = testing_metadata
m.config.variant['pin_run_as_build']['pkg'] = {
'max_pin': 'x.x'
}
dep = render.get_pin_from_build(
m,
'pkg * somestring*',
{'pkg': '1.2.3 somestring_h1234'}
)
assert dep == 'pkg >=1.2.3,<1.3.0a0 somestring*'
|
openstack/horizon
|
openstack_dashboard/test/unit/api/rest/test_keystone.py
|
Python
|
apache-2.0
| 37,897
| 0
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from django.conf import settings
from oslo_serialization import jsonutils
from openstack_dashboard import api
from openstack_dashboard.api.rest import keystone
from openstack_dashboard.test import helpers as test
class KeystoneRestTestCase(test.TestCase):
#
# Version
#
@test.create_mocks({api.keystone: ['get_version']})
def test_version_get(self):
request = self.mock_rest_request()
self.mock_get_version.return_value = '3'
response = keystone.Version().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"version": "3"})
self.mock_get_version.assert_called_once_with()
#
# Users
#
@test.create_mocks({api.keystone: ['user_get']})
def test_user_get(self):
request = self.mock_rest_request()
self.mock_user_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.User().get(request, 'the_id')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
self.mock_user_get.assert_called_once_with(
request, 'the_id', admin=False)
@test.create_mocks({api.keystone: ['user_get']})
def test_user_get_current(self):
request = self.mock_rest_request(**{'user.id': 'current_id'})
self.mock_user_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.User().get(request, 'current')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
self.mock_user_get.assert_called_once_with(
request, 'current_id', admin=False)
@test.create_mocks({api.keystone: ['user_list']})
def test_user_get_list(self):
request = self.mock_rest_request(**{
'session.get': mock.Mock(return_value='the_domain'),
'GET': {},
})
self.mock_user_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Users().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
self.mock_user_list.assert_called_once_with(request, project=None,
domain='the_domain',
group=None,
filters=None)
@test.create_mocks({api.keystone: ['user_list']})
def test_user_get_list_with_filters(self):
filters = {'enabled': True}
request = self.mock_rest_request(**{
'session.get': mock.Mock(return_value='the_domain'),
'GET': dict(**filters),
})
self.mock_user_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Users().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
self.mock_user_list.assert_called_once_with(request, project=None,
domain='the_domain',
group=None,
filters=filters)
def test_user_create_full(self):
self._test_user_create(
'{"name": "bob", '
'"password": "sekrit", "project": "123", '
'"email": "spam@company.example", '
'"description": "hello, puff"}',
{
'name': 'bob',
'password': 'sekrit',
'email': 'spam@company.example',
'project': '123',
'domain': 'the_domain',
'enabled': True,
'description': 'hello, puff'
}
)
def test_user_create_existing_role(self):
self._test_user_create(
'{"name": "bob", '
'"password": "sekrit", "project": "123", '
'"email": "spam@company.example"}',
{
'name': 'bob',
'password': 'sekrit',
'email': 'spam@company.example',
'project': '123',
'domain': 'the_domain',
'enabled': True,
'description': None
}
)
def test_user_create_no_project(self):
self._test_user_create(
'{"name": "bob", '
'"password": "sekrit", "project": "", '
'"email": "spam@company.example"}',
{
'name': 'bob',
'password': 'sekrit',
'email': 'spam@company.example',
'project': None,
'domain': 'the_domain',
'enabled': True,
'description': None
}
)
def test_user_create_partial(self):
self._test_user_create(
'{"name": "bob", "project": ""}',
{
'name': 'bob',
'password': None,
'email': None,
'project': None,
'domain': 'the_domain',
'enabled': True,
'description': None
}
)
@test.create_mocks({api.keystone: ['get_default_domain',
|
'user_create']})
def _test_user_create(self, supplied_body, add_user_call):
request = self.mock_rest_request(body=supplied_body)
self.mock_get_default_domain.return_value = \
mock.Mock(**{'id': 'the_domain'})
|
self.mock_user_create.return_value = mock.Mock(**{
'id': 'user123',
'to_dict.return_value': {'id': 'user123', 'name': 'bob'}
})
response = keystone.Users().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/keystone/users/user123')
self.assertEqual(response.json,
{"id": "user123", "name": "bob"})
self.mock_user_create.assert_called_once_with(request, **add_user_call)
self.mock_get_default_domain.assert_called_once_with(request)
@test.create_mocks({api.keystone: ['user_delete']})
def test_user_delete_many(self):
request = self.mock_rest_request(body='''
["id1", "id2", "id3"]
''')
self.mock_user_delete.return_value = None
response = keystone.Users().delete(request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
self.mock_user_delete.assert_has_calls([
mock.call(request, 'id1'),
mock.call(request, 'id2'),
mock.call(request, 'id3'),
])
@test.create_mocks({api.keystone: ['user_delete']})
def test_user_delete(self):
request = self.mock_rest_request()
self.mock_user_delete.return_value = None
response = keystone.User().delete(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
self.mock_user_delete.assert_called_once_with(request, 'the_id')
@test.create_mocks({api.keystone: ['user_get',
'user_update_password']})
|
datakortet/django-cms
|
cms/tests/menu.py
|
Python
|
bsd-3-clause
| 52,671
| 0.001956
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
from django.db import connection
from cms.api import create_page
from cms.menu import CMSMenu, get_visible_pages
from cms.models import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.test_utils.fixtures.menus import (MenusFixture, SubMenusFixture,
SoftrootFixture, ExtendedMenusFixture)
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.
|
context_managers import (SettingsOverride,
LanguageOverride)
from cms.test_utils.util.mock import AttributeObject
from cms.utils import get_cms_setting
from cms.utils.i18n import force_language
from django
|
.conf import settings
from django.contrib.auth.models import AnonymousUser, User, Permission, Group
from django.contrib.sites.models import Site
from django.template import Template, TemplateSyntaxError
from django.utils.translation import activate
from menus.base import NavigationNode
from menus.menu_pool import menu_pool, _build_nodes_inner_for_one_menu
from menus.models import CacheKey
from menus.utils import mark_descendants, find_selected, cut_levels
from django.utils.unittest.case import skipUnless
class BaseMenuTest(SettingsOverrideTestCase):
def _get_nodes(self, path='/'):
node1 = NavigationNode('1', '/1/', 1)
node2 = NavigationNode('2', '/2/', 2, 1)
node3 = NavigationNode('3', '/3/', 3, 2)
node4 = NavigationNode('4', '/4/', 4, 2)
node5 = NavigationNode('5', '/5/', 5)
nodes = [node1, node2, node3, node4, node5]
tree = _build_nodes_inner_for_one_menu([n for n in nodes], "test")
request = self.get_request(path)
menu_pool.apply_modifiers(tree, request)
return tree, nodes
def setUp(self):
super(BaseMenuTest, self).setUp()
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'CMSMenu': self.old_menu['CMSMenu']}
menu_pool.clear(settings.SITE_ID)
activate("en")
def tearDown(self):
menu_pool.menus = self.old_menu
super(BaseMenuTest, self).tearDown()
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
class ExtendedFixturesMenuTests(ExtendedMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
| + P9
| + P10
| + P11
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.public().filter(level=num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with SettingsOverride(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_show_submenu_nephews(self):
context = self.get_context(path=self.get_page(2).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 1 %}")
tpl.render(context)
nodes = context["children"]
# P2 is the selected node
self.assertTrue(nodes[0].selected)
# Should include P10 but not P11
self.assertEqual(len(nodes[1].children), 1)
self.assertFalse(nodes[1].children[0].children)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# should now include both P10 and P11
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[1].children[0].children), 1)
class FixturesMenuTests(MenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.public().filter(level=num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with SettingsOverride(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_basic_cms_menu(self):
self.assertEqual(len(menu_pool.menus), 1)
with force_language("en"):
response = self.client.get(self.get_pages_root()) # path = '/'
self.assertEquals(response.status_code, 200)
request = self.get_request()
# test the cms menu class
menu = CMSMenu()
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), len(self.get_all_pages()))
def test_show_menu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].selected, True)
self.assertEqual(nodes[0].sibling, False)
self.assertEqual(nodes[0].descendant, False)
self.assertEqual(nodes[0].children[0].descendant, True)
self.assertEqual(nodes[0].children[0].children[0].descendant, True)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[1].get_absolute_url(), self.get_page(4).get_absolute_url())
self.assertEqual(nodes[1].sibling, True)
self.assertEqual(nodes[1].selected, False)
@skipUnless(settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3', 'transaction queries')
def test_show_menu_num_queries(self):
context = self.get_context()
# test standard show_menu
with self.assertNumQueries(5):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
set the menu cache key
"""
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
def test_show_menu_cache_key_leak(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
self.assertEqual(CacheKey.objects.count(), 0)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
def test_menu_keys_duplicate_truncates(self):
"""
When two objects with the same characteristics are present in the
database, get_or_create truncates the database table to "invalidate"
the cache, before retrying. This can happen after migrations, and since
it's only cache, we don't want any propagation of errors.
"""
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.get_or_create(language="fr", site=1, key="a")
self.assertEqual(CacheKey.objects.count(), 1)
def test_only_active_tree(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
|
meskio/bitmask_client
|
src/leap/bitmask/backend/leapbackend.py
|
Python
|
gpl-3.0
| 20,848
| 0
|
# -*- coding: utf-8 -*-
# leapbackend.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Backend for GUI/Logic communication.
"""
import logging
from Queue import Queue, Empty
from twisted.internet import reactor
from twisted.internet import threads, defer
from twisted.internet.task import LoopingCall
import zope.interface
import zope.proxy
from leap.bitmask.backend.leapsignaler import Signaler
from leap.bitmask.backend import components
logger = logging.getLogger(__name__)
class Backend(object):
"""
Backend for everything, the UI should only use this class.
"""
PASSED_KEY = "passed"
ERROR_KEY = "error"
def __init__(self, bypass_checks=False):
"""
Constructor for the backend.
"""
# Components map for the commands received
self._components = {}
# Ongoing defers that will be cancelled at stop time
self._ongoing_defers = []
# Signaler object to translate commands into Qt signals
self._signaler = Signaler()
# Objects needed by several components, so we make a proxy and pass
# them around
self._soledad_proxy = zope.proxy.ProxyBase(None)
self._keymanager_proxy = zope.proxy.ProxyBase(None)
# Component registration
self._register(components.Provider(self._signaler, bypass_checks))
self._register(components.Register(self._signaler))
self._register(components.Authenticate(self._signaler))
self._register(components.EIP(self._signaler))
self._register(components.Soledad(self._soledad_proxy,
self._keymanager_proxy,
self._signaler))
self._register(components.Keymanager(self._keymanager_proxy,
self._signaler))
self._register(components.Mail(self._soledad_proxy,
self._keymanager_proxy,
self._signaler))
# We have a looping call on a thread executing all the
# commands in queue. Right now this queue is an actual Queue
# object, but it'll become the zmq recv_multipart queue
self._lc = LoopingCall(threads.deferToThread, self._worker)
# Temporal call_queue for worker, will be replaced with
# recv_multipart os something equivalent in the loopingcall
self._call_queue = Queue()
@property
def signaler(self):
"""
Public signaler access to let the UI connect to its signals.
"""
return self._signaler
def start(self):
"""
Starts the looping call
"""
logger.debug("Starting worker...")
self._lc.start(0.01)
def stop(self):
"""
Stops the looping call and tries to cancel all the defers.
"""
reactor.callLater(2, self._stop)
def _stop(self):
"""
Delayed stopping of worker. Called from `stop`.
"""
logger.debug("Stopping worker...")
if self._lc.running:
self._lc.stop()
else:
logger.warning("Looping call is not running, cannot stop")
logger.debug("Cancelling ongoing defers...")
while len(self._ongoing_defers) > 0:
d = self._ongoing_defers.pop()
d.cancel()
logger.debug("Defers cancelled.")
def _register(self, component):
"""
Registers a component in this backend
:param component: Component to register
:type component: any object that implements ILEAPComponent
"""
# TODO: assert that the component implements the interfaces
# expected
try:
self._components[component.key] = component
except Exception:
logger.error("There was a problem registering %s" % (component,))
def _signal_back(self, _, signal):
"""
Helper method to signal back (callback like behavior) to the
UI that an operation finished.
:param signal: signal name
:type signal: str
"""
self._signaler.signal(signal)
def _worker(self):
"""
Worker method, called from a different thread and as a part of
a looping call
"""
try:
# this'll become recv_multipart
cmd = self._call_queue.get(block=False)
# cmd is: component, method, signalback, *args
func = getattr(self._components[cmd[0]], cmd[1])
d = func(*cmd[3:])
if d is not None: # d may be None if a defer chain is cancelled.
# A call might not have a callback signal, but if it does,
# we add it to the chain
if cmd[2] is not None:
d.addCallbacks(self._signal_back, logger.error, cmd[2])
d.addCallbacks(self._done_action, logger.error,
callbackKeywords={"d": d})
d.addErrback(logger.error)
self._ongoing_defers.append(d)
except Empty:
# If it's just empty we don't have anything to do.
pass
except defer.CancelledError:
logger.debug("defer cancelled somewhere (CancelledError).")
except Exception as e:
# But we log the rest
logger.exception("Unexpected exception: {0!r}".format(e))
def _done_action(self, _, d):
"""
Remover of the defer once it's done
:param d: defer to remove
:type d: twisted.internet.defer.Deferred
"""
if d in self._ongoing_defers:
self._ongoing_defers.remove(d)
# XXX: Temporal interface until we migrate to zmq
# We simulate the calls to zmq.send_multipart. Once we separate
# this in two processes, the methods bellow can be changed to
# send_multipart and this backend class will be really simple.
def provider_setup(self, provider):
"""
Initiate the setup for a provider.
:param provider: URL for the provider
:type provider: unicode
Signals:
prov_unsupported_client
|
prov_unsupported_api
prov_name_resolution -> { PASSED_KEY: bool, ERROR_KEY: str }
prov_https_connection -> { PASSED_KEY: bool, ERROR_KEY: str }
prov_download_provider_info -> { PASSED_KEY: bool, ERROR_KEY:
|
str }
"""
self._call_queue.put(("provider", "setup_provider", None, provider))
def provider_cancel_setup(self):
"""
Cancel the ongoing setup provider (if any).
"""
self._call_queue.put(("provider", "cancel_setup_provider", None))
def provider_bootstrap(self, provider):
"""
Second stage of bootstrapping for a provider.
:param provider: URL for the provider
:type provider: unicode
Signals:
prov_problem_with_provider
prov_download_ca_cert -> {PASSED_KEY: bool, ERROR_KEY: str}
prov_check_ca_fingerprint -> {PASSED_KEY: bool, ERROR_KEY: str}
prov_check_api_certificate -> {PASSED_KEY: bool, ERROR_KEY: str}
"""
self._call_queue.put(("provider", "bootstrap", None, provider))
def provider_get_supported_services(self, domain):
"""
Signal a list of supported services provided by the given provider.
:param domain: the provider to get the services from.
:type domain: str
Signals:
|
ilausuch/CacheServer
|
src/test/testServer_speed.py
|
Python
|
mit
| 1,247
| 0.035285
|
import sys
|
__author__ = "ilausuch"
__date__ = "$13-jun-2017 20:05:19$"
sys.path.append( "../Addons" )
from Client import Client
from Timer import Timer
count=1000
def test1():
print ("Test 1: Multiple entry set same connection (count={0})".format(count))
client = Client("localhost", 10001
|
)
timer=Timer()
for i in range(0,count):
client.entry_set("test speed",i,i)
client.close()
print ("Seconds: {0}".format(timer.end()))
def test2():
print ("Test 2: Multiple entry set opening/closing connection (count={0})".format(count))
timer=Timer()
for i in range(0,count):
client = Client("localhost", 10001)
client.entry_set("test speed",i,i)
client.close()
client.close()
print ("Seconds: {0}".format(timer.end()))
def test3():
print ("Test 3: Multiple entry get (count={0})".format(count))
client = Client("localhost", 10001)
timer=Timer()
for i in range(0,count):
client.entry_get("test speed",i)
client.close()
print ("Seconds: {0}".format(timer.end()))
def main():
test1()
test2()
test3()
if __name__ == "__main__":
main()
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_versions_create_version_async.py
|
Python
|
apache-2.0
| 1,488
| 0.000672
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under
|
the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
|
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateVersion
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Versions_CreateVersion_async]
from google.cloud import dialogflow_v2
async def sample_create_version():
# Create a client
client = dialogflow_v2.VersionsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.CreateVersionRequest(
parent="parent_value",
)
# Make the request
response = await client.create_version(request=request)
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Versions_CreateVersion_async]
|
feigaochn/leetcode
|
p576_out_of_boundary_paths.py
|
Python
|
mit
| 1,552
| 0
|
#!/usr/bin/env python3
# coding: utf-8
import sys
class Solution:
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
if N == 0:
return 0
from collections import defaultdict
mod = 10 ** 9 + 7
|
ret = 0
maps = [defaultdi
|
ct(int), defaultdict(int)]
for c in range(n):
maps[1][(0, c)] += 1
maps[1][(m - 1, c)] += 1
for r in range(m):
maps[1][(r, 0)] += 1
maps[1][(r, n - 1)] += 1
ret += maps[1].get((i, j), 0)
for step in range(2, N + 1):
midx = step % 2
for r in range(m):
for c in range(n):
maps[midx][(r, c)] = (maps[1 - midx].get((r - 1, c), 0) +
maps[1 - midx].get((r + 1, c), 0) +
maps[1 - midx].get((r, c - 1), 0) +
maps[1 - midx].get((r, c + 1), 0))
if maps[midx][(r, c)] > mod:
maps[midx][(r, c)] %= mod
ret = (ret + maps[midx].get((i, j), 0)) % mod
# print(step, maps[midx])
return ret
def main(args):
sol = Solution()
print(sol.findPaths(2, 2, 2, 0, 0))
print(sol.findPaths(1, 3, 3, 0, 1))
print(sol.findPaths(50, 50, 50, 0, 0))
return
if __name__ == '__main__':
main(sys.argv[1:])
|
totalgood/twote
|
twote/model_utils.py
|
Python
|
mit
| 2,398
| 0.003336
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
IMPORTANT_FIELD_GUESSES = ['id', 'pk', 'name', 'last', 'first', 'full_name', 'summary', 'description', 'user', 'person']
def representation(model, field_names=[], max_fields=None):
"""Unicode representation of Django model instance (object/record/row)"""
representation.max_fields = max_fields if max_fields is not None else representation.max_fields
if not field_names:
field_names = getattr(model, 'IMPORTANT_FIELDS', None)
if field_names is None:
field_names = []
# model_fi
|
elds = set([f.name for f in model._meta.fields])
for f in model._meta.fields:
field_names += [f.name] if f.name in IMPORTANT_FIELD_GUESSES else []
retval = model.__class__.__name__ + u'('
retval += ', '.join("{}".format(repr(getattr(model, s, '') or ''))
for s in field_names[:mi
|
n(len(field_names), representation.max_fields)])
return retval + u')'
representation.max_fields = 5
def name_similarity():
"""Compute the similarity (inverse distance) matrix between committe names"""
pass
class LongCharField(models.CharField):
"An unlimited-length CharField to satisfy by Django and postgreSQL varchar."
description = _("Unlimited-length string")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = int(1e9) # Satisfy management validation.
super(models.CharField, self).__init__(*args, **kwargs)
# Don't add max-length validator like CharField does.
def get_internal_type(self):
# This has no function, since this value is used as a lookup in
# db_type(). Put something that isn't known by django so it
# raises an error if it is ever used.
return 'LongCharField'
def db_type(self, connection):
# *** This is probably only compatible with Postgres.
# 'varchar' with no max length is equivalent to 'text' in Postgres,
# but put 'varchar' so we can tell LongCharFields from TextFields
# when we're looking at the db.
return 'varchar'
def formfield(self, **kwargs):
# Don't pass max_length to form field like CharField does.
return super(models.CharField, self).formfield(**kwargs)
models.LongCharField = LongCharField
|
Data2Semantics/linkitup
|
linkitup/bio2rdf/plugin.py
|
Python
|
mit
| 1,736
| 0.021889
|
"""
Module: plugin.py
Author: Rinke Hoekstra
Created: 2 October 2012
Copyright (c) 2012, Rinke Hoekstra, VU University Amsterdam
http://github.com/Data2Semantics/linkitup
"""
from flask.ext.login import login_required
import re
from linkitup import app
from link
|
itup.util.baseplugin import plugin, SPARQLPlugin
from linkitup.util.provenance import provenance
app.logger.debug("Initializing DrugBank")
endpoints = ['http://drugbank
|
.bio2rdf.org/sparql','http://bioportal.bio2rdf.org/sparql','http://kegg.bio2rdf.org/sparql','http://affymetrix.bio2rdf.org/sparql']
@app.route('/bio2rdf', methods=['POST'])
@login_required
@plugin(fields=[('tags','id','name'),('categories','id','name')], link='mapping')
@provenance()
def link_to_bio2rdf(*args,**kwargs):
# Retrieve the article from the post
article_id = kwargs['article']['id']
match_items = kwargs['inputs']
match_type = kwargs['link']
app.logger.debug("Running Bio2RDF plugin for article {}".format(article_id))
try :
# Initialize the plugin
plugin = SPARQLPlugin(endpoint = endpoints,
template = "bio2rdf.query",
match_type = match_type,
id_base = 'label',
all=True)
# Run the plugin, and retrieve matches using the default label property (rdfs:label)
matches = plugin.match(match_items)
app.logger.debug("Plugin is done, returning the following matches")
app.logger.debug(matches)
# Return the matches
return matches
except Exception as e:
app.logger.error(e.message)
return {'error': e.message }
|
timlinux/geonode
|
geonode/contrib/ows_api/views.py
|
Python
|
gpl-3.0
| 2,017
| 0.000496
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Softwa
|
re Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You sh
|
ould have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.views.generic import View
from django.conf import settings
from geonode.base.enumerations import LINK_TYPES as _LT
# from geonode.base.models import Link
from geonode.utils import json_response
from geonode.geoserver import ows
LINK_TYPES = [L for L in _LT if L.startswith("OGC:")]
class OWSListView(View):
def get(self, request):
out = {'success': True}
data = []
out['data'] = data
# per-layer links
# for link in Link.objects.filter(link_type__in=LINK_TYPES): # .distinct('url'):
# data.append({'url': link.url, 'type': link.link_type})
data.append({'url': ows._wcs_get_capabilities(), 'type': 'OGC:WCS'})
data.append({'url': ows._wfs_get_capabilities(), 'type': 'OGC:WFS'})
data.append({'url': ows._wms_get_capabilities(), 'type': 'OGC:WMS'})
# catalogue from configuration
for catname, catconf in settings.CATALOGUE.items():
data.append({'url': catconf['URL'], 'type': 'OGC:CSW'})
# main site url
data.append({'url': settings.SITEURL, 'type': 'WWW:LINK'})
return json_response(out)
ows_endpoints = OWSListView.as_view()
|
jballanc/openmicroscopy
|
docs/hudson/launcher.py
|
Python
|
gpl-2.0
| 4,193
| 0.0031
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# $Id$
#
# Copyright 2010 Glencoe Software, Inc. All rights reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# Hudson launcher script which properly launches the script
# on the right system. This is used by most jobs via:
#
# cd src
# cd docs
# cd hudson
# python launcher.py
#
# which will:
#
# * download <BRANCH>.log from hudson
# * create hudson.log
# * run sh docs/hudson/OMERO-<BRANCH>-<COMPONENT>.sh
# or docs\hudson\OMERO-<BRANCH>-<COMPONENT>.bat
#
import os
import re
import sys
import urllib
import platform
import subprocess
LOG_URL = "http://hudson.openmicroscopy.org.uk/job/OMERO-%(BRANCH)s/lastSuccessfulBuild/artifact/src/target/%(BRANCH)s.log"
JOB_NAME_STR = "^OMERO-([^-]+)-(.*?)(/(.*))?$"
JOB_NAME_REG = re.compile(JOB_NAME_STR)
class ConfigOpener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode and errcode > 400:
raise Exception("Error loading %s: %s" % (url, errcode))
if __name__ == "__main__":
#
# FIND JOB NAME
#
job_name = os.environ["JOB_NAME"]
m = JOB_NAME_REG.match(job_name)
if not m:
print "Bad job name: %s doesn't match %r" % (job_name, JOB_NAME_STR)
sys.exit(1)
branch = m.group(1)
build = m.group(2)
axises = m.group(4)
if axises:
values = {}
for axis in axises.split(","):
parts = axis.split("=")
values[parts[0]] = parts[1]
job = values["component"]
label = values["label"]
else:
job = build
#
# SETUP
#
os.chdir("..") # docs
os.chdir("..") # OMERO_HOME
top = os.path.abspath(".")
build_log = os.path.join(top, "%s.log" % branch)
hudson_log = os.path.join(top, "hudson.log")
config_file = os.path.join(top, "%s.config" % branch)
#
# LOG FILES
#
log_url = LOG_URL % {"BRANCH": branch}
print "Loading %s ..." % log_url
url = urllib.urlopen(log_url)
build_log_text = url.read()
url.close()
f = open(build_log, "w")
for line in build_log_text.split("\n"):
f.write(line)
f.write("\n")
# Also import the file into the environment
line = line.strip()
parts = line.split("=")
if parts and parts[0]:
k = str(parts[0])
try:
v = str(parts[1])
os.environ[k] = v
except:
os.environ[k] = ""
f.close()
f = open(hudson_log, "w")
for key in sorted(os.environ):
f.write("%s=%s\n" % (key, os.environ[key]))
f.close
#
# CONFIG FILE
# -----------
# If this is not the "start" job, then download
# the <BRANCH>.config file created by start in
# order to access the server.
#
if axises and job != "start":
build_url = os.environ["BUILD_URL"]
build_url = build_url.replace("component=%s" % job, "component=start")
# These jobs don't have their own
# "start" component,
|
so let them use
# the "linux" label.
if label == "macosx" or label == "matlab":
build_url = build_url.replace("label=%s" % label, "label=linux")
build_url = "%s/%s" % (build_url, "artifact/src/%s.config" % branch)
if os.path.exists(config_file):
print "Removing %s ..." % config_file
os.
|
remove(config_file)
print "Downloading %s ... " % build_url
ConfigOpener().retrieve(build_url, filename=config_file)
os.environ["ICE_CONFIG"] = config_file
#
# BUILD COMMAND
#
path = os.path.join("docs", "hudson")
base = "OMERO-%s" % job
if "Windows" == platform.system():
name = base + ".bat"
cmd = []
else:
name = base + ".sh"
cmd = ["sh"]
path = os.path.join(path, name)
cmd.append(path)
#
# RUN
#
print "Launching", " ".join(cmd)
print "="*60
popen = subprocess.Popen(cmd, env = os.environ)
rcode = popen.wait()
if rcode != 0:
print "="*60
print "Build failed with rcode=%s" % rcode
sys.exit(rcode)
|
alexandregz/simian
|
src/simian/mac/munki/handlers/uauth.py
|
Python
|
apache-2.0
| 2,569
| 0.011288
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Module to handle real user logins via GAE SSO"""
import logging
from google.appengine.api import users
from simian import auth as auth_init
from simian.auth import base
from simian.auth import gaeserver
from simian.mac.common import auth
from simian.mac.munki import handlers
class Error(Exception):
"""Base error."""
class NotAuthenticated(Error, base.NotAuthenticated):
"""Not Authenticated Error."""
class UserAuth(handlers.AuthenticationHandler):
"""Handle for user auth which provides Auth1 token."""
def get(self):
"""Handle GET."""
try:
# already munki authenticated? return, nothing to do.
gaeserver.DoMunkiAuth()
#logging.info('Uauth: session is already authenticated')
return
except gaeserver.NotAuthenticated:
pass
user = users.get_current_user()
if not user:
#logging.error('Uauth: user is not logged in')
raise NotAuthenticated
email = user.email()
if auth.IsAdminUser(email):
a = gaeserver.AuthSimianServer()
output = a.SessionCreateUserAuthToken(email, level=gaeserver.LEVEL_ADMIN)
elif auth.IsSupportUser(email):
a = gaeserver.AuthSimianServer()
output = a.SessionCreateUserAuthToken(email, level=gaeserver.LEVEL_BASE)
else:
logging.error('Uauth: user %s is not an admin', email)
raise NotAuthenticated
if output:
#logging.info('Uauth: success, token = %s', output)
self.response.headers['Set-
|
Cookie
|
'] = '%s=%s; secure; httponly;' % (
auth_init.AUTH_TOKEN_COOKIE, output)
self.response.out.write(auth_init.AUTH_TOKEN_COOKIE)
else:
#logging.info('Uauth: unknown token')
raise NotAuthenticated
def post(self):
"""Handle POST.
Because the appengine_rpc module, used by simian.client.UAuth class, uses
the POST http method, define this handler which mirrors the functionaly
of the GET method.
"""
return self.get()
|
charbec1/pokemapfuntimesyay
|
pogom/pgoapi/pgoapi.py
|
Python
|
mit
| 6,103
| 0.00934
|
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person o
|
btaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial
|
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import logging
import re
import requests
from utilities import f2i, h2f
from rpc_api import RpcApi
from auth_ptc import AuthPtc
from auth_google import AuthGoogle
from exceptions import AuthException, NotLoggedInException, ServerBusyOrOfflineException
import protos.RpcEnum_pb2 as RpcEnum
logger = logging.getLogger(__name__)
class PGoApi:
API_ENTRY = 'https://pgorelease.nianticlabs.com/plfe/rpc'
def __init__(self):
self.log = logging.getLogger(__name__)
self._auth_provider = None
self._api_endpoint = None
self._position_lat = 0
self._position_lng = 0
self._position_alt = 0
self._req_method_list = []
def call(self):
if not self._req_method_list:
return False
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
return False
player_position = self.get_position()
request = RpcApi(self._auth_provider)
if self._api_endpoint:
api_endpoint = self._api_endpoint
else:
api_endpoint = self.API_ENTRY
self.log.info('Execution of RPC')
response = None
try:
response = request.request(api_endpoint, self._req_method_list, player_position)
except ServerBusyOrOfflineException as e:
self.log.info('Server seems to be busy or offline - try again!')
# cleanup after call execution
self.log.info('Cleanup of request!')
self._req_method_list = []
return response
#def get_player(self):
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RpcEnum.RequestMethod.Name(i),i))
def set_logger(self, logger):
self._ = logger or logging.getLogger(__name__)
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = f2i(lat)
self._position_lng = f2i(lng)
self._position_alt = f2i(alt)
def __getattr__(self, func):
def function(**kwargs):
if not self._req_method_list:
self.log.info('Create new request...')
name = func.upper()
if kwargs:
self._req_method_list.append( { RpcEnum.RequestMethod.Value(name): kwargs } )
self.log.info("Adding '%s' to RPC request including arguments", name)
self.log.debug("Arguments of '%s': \n\r%s", name, kwargs)
else:
self._req_method_list.append( RpcEnum.RequestMethod.Value(name) )
self.log.info("Adding '%s' to RPC request", name)
return self
if func.upper() in RpcEnum.RequestMethod.keys():
return function
else:
raise AttributeError
def login(self, provider, username, password):
if not isinstance(username, basestring) or not isinstance(password, basestring):
raise AuthException("Username/password not correctly specified")
if provider == 'ptc':
self._auth_provider = AuthPtc()
elif provider == 'google':
self._auth_provider = AuthGoogle()
else:
raise AuthException("Invalid authentication provider - only ptc/google available.")
self.log.debug('Auth provider: %s', provider)
if not self._auth_provider.login(username, password):
self.log.info('Login process failed')
return False
self.log.info('Starting RPC login sequence (app simulation)')
# making a standard call, like it is also done by the client
self.get_player()
self.get_hatched_eggs()
self.get_inventory()
self.check_awarded_badges()
self.download_settings(hash="4a2e9bc330dae60e7b74fc85b98868ab4700802e")
response = self.call()
if not response:
self.log.info('Login failed!')
return False
if 'api_url' in response:
self._api_endpoint = ('https://{}/rpc'.format(response['api_url']))
self.log.debug('Setting API endpoint to: %s', self._api_endpoint)
else:
self.log.error('Login failed - unexpected server response!')
return False
if 'auth_ticket' in response:
self._auth_provider.set_ticket(response['auth_ticket'].values())
self.log.info('Finished RPC login sequence (app simulation)')
self.log.info('Login process completed')
return True
|
dimkal/mne-python
|
mne/beamformer/tests/test_lcmv.py
|
Python
|
bsd-3-clause
| 15,822
| 0
|
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import warnings
import mne
from mne import compute_covariance
from mne.datasets import testing
from mne.beamformer import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
from mne.beamformer._lcmv import _lcmv_source_power
from mne.externals.six import advance_iterator
from mne.utils import run_tests_if_main, slow_test
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
label = 'Aud-lh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
warnings.simplefilter('always') # enable b/c these tests throw warnings
def read_forward_solution_meg(*args, **kwargs):
fwd = mne.read_forward_solution(*args, **kwargs)
return mne.pick_types_forward(fwd, meg=True, eeg=False)
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
epochs_preload=True, data_cov=True):
"""Read in data used in tests
"""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)
raw = mne.io.Raw(fname_raw, preload=True)
forward = mne.read_forward_solution(fname_fwd)
if all_forward:
forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
surf_ori=True)
forward_vol = read_forward_solution_meg(fname_fwd_vol, surf_ori=True)
else:
forward_surf_ori = None
forward_fixed = None
forward_vol = None
event_id, tmin, tmax = 1, tmin, tmax
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
if epochs:
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, ref_meg=False, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0),
preload=epochs_preload,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
if epochs_preload:
epochs.resample(200, npad=0, n_jobs=2)
evoked = epochs.average()
info = evoked.info
else:
epochs = None
evoked = None
info = raw.info
noise_cov = mne.read_cov(fname_cov)
noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,
eeg=0.1, proj=True)
if data_cov:
with warnings.catch_warnings(record=True):
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
else:
data_cov = None
return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol
@slow_test
@testing.requires_testing_data
def test_lcmv():
"""Test LCMV with evoked data and single trials
"""
raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
for fwd in [forward, forward_vol]:
stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)
stc.crop(0.02, None)
stc_pow = np.sum(stc.data, axis=1)
idx = np
|
.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.09 < tmax < 0.105, tmax)
assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))
if fwd is forward:
# Test picking normal orientation (surface source space only)
stc_normal = lcmv(evoked, forward_surf_ori, noise_cov, data_cov,
reg=0.01, pick_ori="normal
|
")
stc_normal.crop(0.02, None)
stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
idx = np.argmax(stc_pow)
max_stc = stc_normal.data[idx]
tmax = stc_normal.times[np.argmax(max_stc)]
assert_true(0.04 < tmax < 0.11, tmax)
assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))
# The amplitude of normal orientation results should always be
# smaller than free orientation results
assert_true((np.abs(stc_normal.data) <= stc.data).all())
# Test picking source orientation maximizing output source power
stc_max_power = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01,
pick_ori="max-power")
stc_max_power.crop(0.02, None)
stc_pow = np.sum(stc_max_power.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc_max_power.data[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.09 < tmax < 0.11, tmax)
assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))
# Maximum output source power orientation results should be similar to
# free orientation results
assert_true((stc_max_power.data - stc.data < 1).all())
# Test if fixed forward operator is detected when picking normal or
# max-power orientation
assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
reg=0.01, pick_ori="max-power")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, lcmv, evoked, forward, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, lcmv, evoked, forward_vol, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
# Now test single trial using fixed orientation forward solution
# so we can compare it to the evoked solution
stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
stcs_ = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01,
return_generator=True)
assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
epochs.drop_bad_epochs()
assert_true(len(epochs.events) == len(stcs))
# average the single trial estimates
stc_avg = np.zeros_like(stcs[0].data)
for this_stc in stcs:
stc_avg += this_stc.data
stc_avg /= len(stcs)
# compare it to the solution using evoked with fixed orientation
stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)
assert_array_almost_equal(stc_avg, stc_fixed.data)
# use a label so we have few source vertices and delayed computation is
# not used
stcs_label = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,
reg=0.01, label=label)
assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
@testing.requires_testing_data
def test_lcmv_raw():
"""Test LCMV with raw data
"""
raw, _, _, _, noise_cov, label, forward, _, _, _ =\
_get_data(all_forward=False, epochs=False, data_cov=False)
tmin, tmax = 0, 20
start, stop = raw.time_as_index([tmin, tmax])
# use only the left-temporal MEG channels for LCMV
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, exclude='bads',
selection=left_temporal_channe
|
mkhuthir/learnPython
|
Book_learning-python-r1.1/ch2/sequences.py
|
Python
|
mit
| 1,614
| 0.019279
|
# sequences.py
# strings
>>> # 4 ways to make a string
>>> str1 = 'This is a string. We built it with single quotes.'
>>> str2 = "This is also a string, but built with double quotes."
>>> str3 = '''This is built using triple quotes,
... so it can span multiple lines.'''
>>> str4 = """This too
... is a multiline one
... built with triple double-quotes."""
>>> str4 #A
'This too\nis a multiline one\nbuilt with triple double-quotes.'
>>> print(str4) #B
This too
is a multiline one
built with triple double-quotes.
>>>
# encode / decode
>>> s = "This is üŋíc0de" # unicode string: code points
>>> type(s)
<class 'str'>
>>> encoded_s = s.encode('utf-8') # utf-8 encoded version of s
>>> encoded_s
b'This is \xc3\xbc\xc5\x8b\xc3\xadc0de' # result: bytes object
>>> type(encoded_s) # another way to verify it
<class 'bytes'>
>>> encoded_s.decode('utf-8') # let's revert to the original
'This is üŋíc0de'
>>> bytes_obj = b"A bytes object" # a bytes object
>>> type(bytes_obj)
<class 'bytes'>
# length
>>> len(str1)
49
# indexing and slicing
>>> s = "The trouble is you think you have time."
>>> s[0] # indexing at position 0, which is the fi
|
rst char
'T'
>>> s[5] # indexing at position 5, which is the sixth char
'r'
>>> s[:4] # slicing, we specify only the stop position
'The '
>>> s[4:] # slicing, we specify only the start position
'trouble is you think you have time.'
>>> s[2:14] # slicing, both start and stop positions
'e trouble is'
>>> s[2:14:3] # slicing, star
|
t, stop and step (every 3 chars)
'erb '
>>> s[:] # quick way of making a copy
'The trouble is you think you have time.'
|
bis12/yapwaf
|
yapwaf/controller.py
|
Python
|
mit
| 2,107
| 0.001424
|
"""
The controller base class
"""
from .routes import Route
from .view import View
class Controller(object):
def __init__(self, entity, env):
"""Instantiate a controller with the name of the entity and the
environment dict.
"""
self.entity = entity.strip('/^$')
if not self.entity:
self.entity = 'index'
self.routes = []
self.register_routes()
self.env = env
def register_routes(self):
"""Simple internal method
|
to run through all of the methods of this class
and see if they've been decorated to be endpoints.
"""
for funcname in dir(self):
func = getattr(self, funcname)
if hasattr(func, '_method') and hasattr(func, '_path'):
self.update_routes(func._method, func._path, func)
def update_routes(self, method, matcher, endpoint):
"""Adds an endpoint into the possible endpoints of a path based on
|
its HTTP method
"""
for route in self.routes:
if route.key == matcher:
route.update(method, endpoint)
return
# If the route has not been added to the routes yet
self.routes.append(Route(method, matcher, endpoint))
def route(self, env):
"""Called by the application to route the requests to the proper endpoint
in this controller.
"""
for route in self.routes:
if self.entity == 'index':
path = '/' + '/'.join(env['PATH_INFO'].split('/')[1:])
else:
path = '/' + '/'.join(env['PATH_INFO'].split('/')[2:])
if route.match(path):
ans = route.call(env['REQUEST_METHOD'], env['PATH_INFO'], env)
if ans[1] == 'no_template':
return ans[0]
if '/' in ans[0]:
view = View(ans[0].split('/')[0])
return view.render(ans[0], ans[1])
else:
view = View(self.entity)
return view.render(ans[0], ans[1])
|
rizar/attention-lvcsr
|
lvsr/expressions.py
|
Python
|
mit
| 2,325
| 0.00129
|
from theano import tensor
from theano.tensor.nnet import conv2d
def weights_std(weights, mask_outputs=None):
positions = tensor.arange(weights.shape[2])
expected = (weights * positions).sum(axis=2)
expected2 = (weights * positions ** 2).sum(axis=2)
result = (expected2 - expected ** 2) ** 0.5
if mask_outputs:
result *= mask_outputs
return result.sum() / weights.shape[0]
def monotonicity_penalty(weights, mask_x=None):
cumsums = tensor.cumsum(weights, axis=2)
penalties = tensor.maximu
|
m(cumsums[1:] - cumsums[:-1], 0).sum(axis=2)
if mask_x:
penalties *= mask_x[1:]
return penalties.sum()
def entropy(weights, mask_x):
entropies = (weights * tensor.log(weights + 1e-7)).sum(axis=2)
entropies *= mask_x
return entropies.sum()
def conv1d(sequences, masks, **kwargs):
"""Wraps Theano conv2d to perform 1D convolution.
Parameters
----------
sequence : :class
|
:`~theano.Variable`
(batch_size, length)
masks : :class:`~theano.Variable`
(num_filters, filter_length)
**kwargs
Will be passed to `conv2d`
Returns
-------
result : :class:`~theano.Variable`
(batch_size, num_filters, position)
"""
# For testability
sequences = tensor.as_tensor_variable(sequences)
masks = tensor.as_tensor_variable(masks)
image = sequences.dimshuffle('x', 'x', 0, 1)
filters = masks.dimshuffle(0, 'x', 'x', 1)
result = conv2d(image, filters, **kwargs)
# Now number of rows is the actual batch size
result = result.dimshuffle(2, 1, 3, 0)
return result.reshape(result.shape[:-1], ndim=3)
def pad_to_a_multiple(tensor_, k, pad_with):
"""Pad a tensor to make its first dimension a multiple of a number.
Parameters
----------
tensor_ : :class:`~theano.Variable`
k : int
The number, multiple of which the length of tensor is made.
pad_with : float or int
The value for padding.
"""
new_length = (
tensor.ceil(tensor_.shape[0].astype('float32') / k) * k).astype('int64')
new_shape = tensor.set_subtensor(tensor_.shape[:1], new_length)
canvas = tensor.alloc(pad_with, tensor.prod(new_shape)).reshape(
new_shape, ndim=tensor_.ndim)
return tensor.set_subtensor(canvas[:tensor_.shape[0]], tensor_)
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/core/nanops.py
|
Python
|
gpl-3.0
| 18,949
| 0.000422
|
from pandas import compat
import sys
import itertools
import functools
import numpy as np
from pandas.core.common import isnull, notnull, _values_from_object, is_float
import pandas.core.common as com
import pandas.lib as lib
import pandas.algos as algos
import pandas.hashtable as _hash
import pandas.tslib as tslib
from pandas.compat import builtins
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
return f(*args, **kwargs)
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
return 0
|
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and
|
_bn_ok_dtype(values.dtype):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return f
def _bn_ok_dtype(dt):
# Bottleneck chokes on datetime64
time_types = np.datetime64, np.timedelta64
return dt != np.object_ and not issubclass(dt.type, time_types)
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result)
elif result.dtype == 'f4':
return lib.has_infs_f4(result)
return False
return np.isinf(result) or np.isneginf(result)
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return np.iinfo(np.int64).max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = com._maybe_upcast_putmask(values, mask,
fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
return values, mask, dtype
def _isfinite(values):
if issubclass(values.dtype.type, (np.timedelta64, np.datetime64)):
return isnull(values)
elif isinstance(values.dtype, object):
return -np.isfinite(values.astype('float64'))
return -np.isfinite(values)
def _na_ok_dtype(dtype):
return not issubclass(dtype.type, (np.integer, np.datetime64,
np.timedelta64))
def _view_if_needed(values):
if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if issubclass(dtype.type, np.datetime64):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif issubclass(dtype.type, np.timedelta64):
if not isinstance(result, np.ndarray):
# this is a scalar timedelta result!
# we have series convert then take the element (scalar)
# as series will do the right thing in py3 (and deal with numpy
# 1.6.2 bug in that it results dtype of timedelta64[us]
from pandas import Series
# coerce float to results
if is_float(result):
result = int(result)
result = Series([result], dtype='timedelta64[ns]')
else:
result = result.view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, 0)
the_sum = values.sum(axis)
the_sum = _maybe_null_out(the_sum, axis, mask)
return the_sum
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis))
count = _get_counts(mask, axis)
if axis is not None:
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if values.dtype != np.float64:
values = values.astype('f8')
|
ultrabug/uhashring
|
uhashring/__init__.py
|
Python
|
bsd-3-clause
| 70
| 0
|
from uhashri
|
ng.ring import HashRing
__all__ = ["Has
|
hRing", "monkey"]
|
dthoreau/rpi_rally
|
services/daemons/uniserve-flask/app.py
|
Python
|
mit
| 302
| 0.003311
|
#!/usr/bin/env python3
import connexion
if _
|
_name__ == '__main__':
app = connexion.App(__name__, specification
|
_dir='./swagger/')
app.add_api('swagger.yaml', arguments={'title': 'A second cut at writing the code initial formed in web.py for the PiMoroni UnicornHAT\n'})
app.run(port=8080)
|
grimoirelab/perceval
|
tests/test_meetup.py
|
Python
|
gpl-3.0
| 37,799
| 0.001587
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
# Stephan Barth <stephan.barth@gmail.com>
# Valerio Cosentino <valcos@bitergia.com>
# Miguel Ángel Fernández <mafesan@bitergia.com>
# Harshal Mittal <harshalmittal4@gmail.com>
#
import copy
import datetime
import dateutil.tz
import httpretty
import os
import pkg_resources
import time
import unittest
import unittest.mock
import warnings
import requests
pkg_resources.declare_namespace('perceval.backends')
from perceval.backend import BackendCommandArgumentParser
from perceval.errors import RateLimitError, RepositoryError
from perceval.utils import DEFAULT_DATETIME
from perceval.backends.core.meetup import (Meetup,
MeetupCommand,
MeetupClient,
MIN_RATE_LIMIT)
from base import TestCaseBackendArchive
warnings.filterwarnings("ignore")
MEETUP_URL = 'https://api.meetup.com'
MEETUP_GROUP_URL = MEETUP_URL + '/sqlpass-es'
MEETUP_EVENTS_URL = MEETUP_GROUP_URL + '/events'
MEETUP_EVENT_1_URL = MEETUP_EVENTS_URL + '/1'
MEETUP_EVENT_2_URL = MEETUP_EVENTS_URL + '/2'
MEETUP_EVENT_3_URL = MEETUP_EVENTS_URL + '/3'
MEETUP_EVENT_1_COMMENTS_URL = MEETUP_EVENT_1_URL + '/comments'
MEETUP_EVENT_2_COMMENTS_URL = MEETUP_EVENT_2_URL + '/comments'
MEETUP_EVENT_3_COMMENTS_URL = MEETUP_EVENT_3_URL + '/comments'
MEETUP_EVENT_1_RSVPS_URL = MEETUP_EVENT_1_URL + '/rsvps'
MEETUP_EVENT_2_RSVPS_URL = MEETUP_EVENT_2_URL + '/rsvps'
MEETUP_EVENT_3_RSVPS_URL = MEETUP_EVENT_3_URL + '/rsvps'
MEETUP_COMMENTS_URL = [
MEETUP_EVENT_1_COMMENTS_URL,
MEETUP_EVENT_2_COMMENTS_URL,
MEETUP_EVENT_3_COMMENTS_URL
]
MEETUP_RSVPS_URL = [
MEETUP_EVENT_1_RSVPS_URL,
MEETUP_EVENT_2_RSVPS_URL,
MEETUP_EVENT_3_RSVPS_URL
]
def read_file(filename
|
, mode='r'):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), filename), mode) as f:
|
content = f.read()
return content
def setup_http_server(rate_limit=-1, reset_rate_limit=-1):
"""Setup a mock HTTP server"""
http_requests = []
events_bodies = [
read_file('data/meetup/meetup_events.json', 'rb'),
read_file('data/meetup/meetup_events_next.json', 'rb')
]
events_range_body = read_file('data/meetup/meetup_events_range.json', 'rb')
events_empty_body = read_file('data/meetup/meetup_events_empty.json', 'rb')
event_comments_body = read_file('data/meetup/meetup_comments.json', 'rb')
event_rsvps_body = read_file('data/meetup/meetup_rsvps.json', 'rb')
def request_callback(method, uri, headers, too_many_requests=False):
last_request = httpretty.last_request()
if uri.startswith(MEETUP_EVENT_1_COMMENTS_URL):
body = event_comments_body
elif uri.startswith(MEETUP_EVENT_2_COMMENTS_URL):
body = event_comments_body
elif uri.startswith(MEETUP_EVENT_3_COMMENTS_URL):
body = event_comments_body
elif uri.startswith(MEETUP_EVENT_1_RSVPS_URL):
body = event_rsvps_body
elif uri.startswith(MEETUP_EVENT_2_RSVPS_URL):
body = event_rsvps_body
elif uri.startswith(MEETUP_EVENT_3_RSVPS_URL):
body = event_rsvps_body
elif uri.startswith(MEETUP_EVENTS_URL):
params = last_request.querystring
scroll = params.get('scroll', None)
if scroll and scroll[0] == 'since:2016-09-25T00:00:00.000Z':
# Last events and no pagination
body = events_bodies[-1]
elif scroll and scroll[0] == 'since:2016-04-08T00:00:00.000Z':
body = events_range_body
elif scroll and scroll[0] == 'since:2017-01-01T00:00:00.000Z':
body = events_empty_body
else:
body = events_bodies.pop(0)
if events_bodies:
# Mock the 'Link' header with a fake URL
headers['Link'] = '<' + MEETUP_EVENTS_URL + '>; rel="next"'
if rate_limit != -1:
headers['X-RateLimit-Remaining'] = str(rate_limit)
if reset_rate_limit != -1:
headers['X-RateLimit-Reset'] = str(reset_rate_limit)
else:
raise
if rate_limit == -1:
headers['X-RateLimit-Remaining'] = '10000000'
if reset_rate_limit == -1:
headers['X-RateLimit-Reset'] = '0'
http_requests.append(last_request)
return (200, headers, body)
httpretty.register_uri(httpretty.GET,
MEETUP_EVENTS_URL,
responses=[
httpretty.Response(body=request_callback)
for _ in range(2)
])
for url in MEETUP_COMMENTS_URL:
httpretty.register_uri(httpretty.GET,
url,
responses=[
httpretty.Response(body=request_callback)
])
for url in MEETUP_RSVPS_URL:
httpretty.register_uri(httpretty.GET,
url,
responses=[
httpretty.Response(body=request_callback)
])
return http_requests
class MockedMeetupClient(MeetupClient):
"""Mocked meetup client for testing"""
def __init__(self, token, max_items, min_rate_to_sleep, sleep_for_rate):
super().__init__(token, max_items=max_items,
min_rate_to_sleep=min_rate_to_sleep,
sleep_for_rate=sleep_for_rate)
self.rate_limit_reset_ts = -1
class TestMeetupBackend(unittest.TestCase):
"""Meetup backend tests"""
def setUp(self):
warnings.simplefilter("ignore")
def test_initialization(self):
"""Test whether attributes are initialized"""
meetup = Meetup('mygroup', 'aaaa', max_items=5, tag='test',
sleep_for_rate=True, min_rate_to_sleep=10, sleep_time=60)
self.assertEqual(meetup.origin, 'https://meetup.com/')
self.assertEqual(meetup.tag, 'test')
self.assertEqual(meetup.group, 'mygroup')
self.assertEqual(meetup.max_items, 5)
self.assertIsNone(meetup.client)
self.assertTrue(meetup.ssl_verify)
# When tag is empty or None it will be set to
# the value in URL
meetup = Meetup('mygroup', 'aaaa', ssl_verify=False)
self.assertEqual(meetup.origin, 'https://meetup.com/')
self.assertEqual(meetup.tag, 'https://meetup.com/')
self.assertFalse(meetup.ssl_verify)
meetup = Meetup('mygroup', 'aaaa', tag='')
self.assertEqual(meetup.origin, 'https://meetup.com/')
self.assertEqual(meetup.tag, 'https://meetup.com/')
def test_has_archiving(self):
"""Test if it returns True when has_archiving is called"""
self.assertTrue(Meetup.has_archiving())
def test_has_resuming(self):
"""Test if it returns True when has_resuming is called"""
self.assertTrue(Meetup.has_resuming())
@httpretty.activate
def test_fetch(self):
"""Test whether it fetches a set of events"""
http_requests = setup_http_server()
meetup = Meetup('sqlpass-es', 'aaaa', max_items=2)
events = [event for event in
|
nelsonw2014/CSGOInvCacheConverter
|
cicc/image.py
|
Python
|
mit
| 729
| 0.001372
|
from PIL import ImageFile, Image
class CSGOInventoryCacheFile(ImageFile.ImageFile):
format = "IIC"
format_description = "CS:GO Inventory Image Cache"
def _open(self):
self.mode = "RGBA"
|
self.size = 512, 384
self.tile = [
("raw", (0, 0) + self.size, 0, ("BGRA", 0, 1))
]
def convert_cache_to_image(original_location, new_location):
Image.register_open("IIC", CSGOInventoryCacheFile)
Image.register_extension("IIC", ".iic")
try:
|
with open(original_location, "rb") as original_img:
img = Image.open(original_img)
img.save(new_location)
except Exception as e:
raise Exception("Originating file does not exist: ", e)
|
itensionanders/distcc
|
include_server/cache_basics.py
|
Python
|
gpl-2.0
| 37,214
| 0.004407
|
#! /usr/bin/env python3
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
"""Classes enabling definition and composition of caches.
This file defines caches used to speed up the does-this-file-exist
test that forms the basis of the C preprocessor's include-file
handling, and takes most of its time.
When the preprocessor sees a line like "#include <foo/bar.h>" it looks
for a file named "bar.h" in many directories: /usr/include/foo/bar.h,
./foo/bar.h, and so forth. More precisely, the preprocessor is given
a "search path", which is a list of directory-names. (By default, the
search-path looks like ['/usr/include', '/usr/local/include', ...],
but it's often extended via gcc flags like -I, -isystem, -iprefix,
etc.) To resolve a single #include like "#include <foo/bar.h>", the
preprocessor goes through every directory in the search path, running
os.stat(os.path.join(current_working_dir, search_dir, 'foo/bar.h'))
until the stat call succeeds. With dozens of search-dirs to look
through, dozens of #include lines per source file, and hundreds of
source files per compilation, this can add up to millions of stat
calls. Many of these calls are exactly the same, so caching is a big
win.
The cache of stat calls takes a filename as input and produces a bool
as output, saying if the filename exists. For reasons that will
become clear in a moment, we actually represent the input filename as
a triple that breaks the filename into its three components:
1) currdir: the current working directory (usually os.path.absdir('.'))
2) searchdir: an element of the search path (eg '/usr/include', 'base')
3) includepath: the thing that comes after "#include" in source files
("foo/bar.h" in our examples above).
Why do we break the input into three parts? Consider what cache-lookups
we have to do for a single source file:
cache[os.path.join(currdir, searchdir1, includepath1)] # #include <ipath1>
cache[os.path.join(currdir, searchdir2, includepath1)] # #include <ipath1>
cache[os.path.join(currdir, searchdir3, includepath1)] # #include <ipath1>
[etc...until the cache-lookup returns True]
cache[os.path.join(currdir, searchdir1, includepath2)] # #include <ipath2>
cache[os.path.join(currdir, searchdir2, includepath2)] # #include <ipath2>
cache[os.path.join(currdir, searchdir3, includepath2)] # #include <ipath2>
[etc]
By having the key be a triple, we avoid all those unnecessary
os.path.join calls. But even if we do this, we notice bigger fish
to fry: the Python interpreter still has to do a string-hash of
currdir for every lookup, and also has to string-hash searchdirX and
includepathX many times. It would be much more efficient if we did
those hashes ourselves, reducing the number of string-hashes from
O(|search-path| * |#include lines|) to
O(|search-path| + |#include lines|).
This motivates (finally!) the data structures in this file. We have
three string-to-number maps, for mapping each currdir, searchdir, and
includepath to a small integer. We put that all together in a cache,
that takes a triple of integers as its key and produces True if the
file exists, False if it does not, or None if its status is unknown.
The String-to-number Map(s)
---------------------------
The basic map that converts a filepath-path -- a currdir, searchdir,
or includepath -- to a small integer is called MapToIndex. MapToIndex
provides mapping in both directions:
index: a dictionary mapping paths (strings) to indices in 1..N, and
string: an array of size N + 1 that implements the reverse mapping
So:
obj.string[obj.index[path_as_string]] == path_as_string
obj.index[obj.string[path_as_number]] == path_as_number
Note we map from 1..N, and not 0..N-1, which leave us 0 free to use as
a synonym for None or False.
There are also classes that specialize MapToIndex for specific purposes.
DirectoryMapToIndex assumes the input is a directory, and in
particular a directory that does not have a slash at the end of it (eg
"/etc"). It adds the trailing sla
|
sh before inserting into the map.
This is useful because it allows us to use + to join this directory
with a relative filename, rather than the slower os.path.join().
RelpathMapToIndex assumes the input is a relative filepath, that is,
one that does not start with /. When combined with DirectoryMapToIndex
entries, + can be used as a fast alternative to os.path.join().
CanonicalMapToIndex is a MapToIndex that canonializes its
|
input before
inserting it into the map: resolving symlinks, getting rid of ..'s,
etc. It takes an absolute path as input.
Other Caches
------------
Besides the maps from strings to integers, there are three other caches.
One is the realpath-cache, that takes a filename and returns
os.path.realpath(filename). We cache this because os.path.realpath()
is very slow. This is called CanonicalPath.
The second cache, the DirnameCache, maps an arbitrary pathname to
dirname(pathname), that is, the directory the pathname is in. The
input pathname is represented by a (currdir_idx, searchdir_idx,
includepath_idx) triple. The output is likewise represented as a
number: an index into the DirectoryMapToIndex structure.
The third cache is called SystemdirPrefixCache. It tells you, for a
given absolute filepath, whether it is prefixed by a systemdir (that
is, one of the searchdirs that's built into cpp, such as /usr/include).
This is useful to cache because there are several systemdirs, and it's
expensive to check them all each time.
Naming Conventions
------------------
currdir: the current working dir.
searchdir: an element of the search-path (places cpp looks for .h files).
includepath: the string a source file #includes.
realpath: a full filepath with all its symlinks resolved:
os.path.realpath(os.path.join(currdir, searchdir, includepath))
FOO_idx: the small integer associated with the string FOO.
includepath_map: the map that takes includepaths to their idx and back
(a RelpathMapToIndex).
directory_map: the map that takes currdirs and searchdirs to their
idx and back. It also is used to store dirname(filepath) for arbitrary
filepaths -- basically, anything we know is a directory (a
DirectoryMapToIndex).
realpath_map: the map that takes full filepaths to their idx and back,
canonicalizing them first (by resolving symlinks) (a
CanonicalMapToIndex).
searchlist: a list of searchdirs. In gcc/cpp documentation, this is
called the "search path", but for consistency, in this code we reserve
the name "path" to mean "filesystem component," never "list of dirs".
(A list of strings).
systemdir: a searchdir that's built into cpp, rather than set via -I.
(A string.)
resolved_filepath: given an includepath, and a (possibly implicit)
currdir and searchlist, the resolved_filepath is
os.path.join(currdir, searchdir, includepath)
for the first searchdir in searchlist for which the joined string
exists. This path can be represented in many ways: 1) a string like
"foo/bar/baz.h" (if so, this string has been canonicalized to resolve
symlinks and the like); 2) an index into realpath_map associated with
that string; 3) a triple of indices; or 4) a pair of indices plus an
assumption that os.getcwd() == currdir.
Pair Represenation of Filepaths
-------------------------------
A file is uniquely determined by the triple
(currdir_idx, searchd
|
alexis-roche/nipy
|
nipy/core/image/__init__.py
|
Python
|
bsd-3-clause
| 518
| 0.001931
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The Image class provides the interface which should be used
by users at the application level. The image provides a coordinate map,
and the data itself.
"""
from __future__ import
|
absolute_import
__docformat__ = 'restructuredtext'
# You'd usually use nipy.core.api for these
from . import image
from .image import Image
from nipy.t
|
esting import Tester
test = Tester().test
bench = Tester().bench
|
designcc/django-ccpages
|
ccpages/templatetags/ccpages_tags.py
|
Python
|
bsd-3-clause
| 921
| 0.005429
|
from django import template
from django.conf import settings
from ccpages.models import Page
register = template.Library()
@register.inclusion_tag('ccpages/_js.html')
def ccpages_js():
return {
'STATIC_URL': settings.STATIC_URL,
}
@register.inclusion_tag('ccpages/_css.html')
def ccpages_css():
return {
'STATIC_URL': settings.STATIC_URL,
}
|
@register.inclusion_tag('ccpages/_nav_breadcrumb.html')
def ccpages_nav_breadcrumbs(page):
"""returns a breadcrumb"""
return {
'pages': Page.objects.nav_breadcrumbs(pa
|
ge),
'page': page,
}
@register.inclusion_tag('ccpages/_nav_local.html')
def ccpages_nav_local(page):
"""returns the local nav for a given page's root"""
return {
'pages': Page.objects.nav_local(page)
}
@register.assignment_tag
def ccpages_nav_global():
"""returns the global pages"""
return Page.objects.nav_global()
|
hexagonist/RedditDaltonizerBot
|
get_imgur_tokens.py
|
Python
|
mit
| 540
| 0.003704
|
from imgurpython import ImgurClient
import webbrowser
import credentials
client = ImgurClient(cre
|
dentials.imgur['client_id'], credentials.imgur['client_secret'])
# Authorization flow, pin example (see docs for other auth types)
authorization_url = client.get_auth_url('pin')
print authorization_url
webbrowser.open(authorization_url)
pin = raw_input("Enter pin : ")
credentials = client.authorize(pin, "pin")
print "Imgur Access token : %s" % credentials["access_token"]
print "Imgur Ref
|
resh token : %s" % credentials["refresh_token"]
|
tylerdave/cookiecutter
|
tests/test_is_vcs_installed.py
|
Python
|
bsd-3-clause
| 305
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_is_vcs_installed
---------------------
"""
from cookiecutter import vcs
def test_existing_repo_type():
assert vcs.is_vcs_installed("git")
|
def
|
test_non_existing_repo_type():
assert not vcs.is_vcs_installed("stringthatisntashellcommand")
|
roboime/pyroboime
|
roboime/utils/mathutils.py
|
Python
|
agpl-3.0
| 2,108
| 0.001423
|
#
# Copyright (C) 2013-2015 RoboIME
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
import numpy as np
from numpy import math
_trigonom_ = ['sin', 'cos', 'tan']
_invtrigonom_ = ['a' + f for f in _trigonom_] + ['atan2']
_restricted_ = ['trunc']
for fun in dir(math):
if fun in _restricted_:
pass
elif fun in _trigonom_:
exec '{0} = lambda x: math.{0}(math.radians(x))'.format(fun) in globals()
elif fun == 'atan2':
exec '{0} = lambda y, x: math.degrees(math.{0}(y, x))'.format(fun) in globals()
elif fun in _invtrigonom_:
exec '{0} = lambda x: math.degrees(math.{0}(x))'.format(fun) in globals()
else:
exec '{0} = math.{0}'.format(fun)
def norm(vector):
""" Returns the norm (length) of the vector."""
# note: this is a very hot function, hence the odd optimization
# Unoptimized it is: return np.sqrt(np.sum(np.square(vector)))
return np.sqrt(np.dot(vector, vector))
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707
|
963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_v
|
ector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if math.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return 180
return math.degrees(angle)
|
hugobuddel/orange3
|
Orange/tests/test_instance.py
|
Python
|
gpl-3.0
| 14,298
| 0.00028
|
from math import isnan
import warnings
import unittest
from unittest.mock import MagicMock
import numpy as np
from numpy.testing import assert_array_equal
from Orange.data import \
Instance, Domain, Unknown, Value, \
DiscreteVariable, ContinuousVariable, StringVariable
class TestInstance(unittest.TestCase):
attributes = ["Feature %i" % i for i in range(10)]
class_vars = ["Class %i" % i for i in range(1)]
metas = [DiscreteVariable("Meta 1", values="XYZ"),
ContinuousVariable("Meta 2"),
StringVariable("Meta 3")]
def mock_domain(self, with_classes=False, with_metas=False):
attributes = self.attributes
class_vars = self.class_vars if with_classes else []
metas = self.metas if with_metas else []
variables = attributes + class_vars
return MagicMock(Domain,
attributes=attributes,
class_vars=class_vars,
metas=metas,
variables=variables)
def create_domain(self, attributes=(), classes=(), metas=()):
attr_vars = [ContinuousVariable(name=a) if isinstance(a, str) else a
for a in attributes]
class_vars = [ContinuousVariable(name=c) if isinstance(c, str) else c
for c in classes]
meta_vars = [DiscreteVariable(name=m, values=map(str, range(5)))
if isinstance(m, str) else m
for m in metas]
domain = Domain(attr_vars, class_vars, meta_vars)
return domain
def test_init_x_no_data(self):
domain = self.mock_domain()
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
self.assertTrue(all(isnan(x) for x in inst._x))
def test_init_xy_no_data(self):
domain = self.mock_domain(with_classes=True)
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (len(self.class_vars), ))
self.assertEqual(inst._metas.shape, (0, ))
self.assertTrue(all(isnan(x) for x in inst._x))
self.assertTrue(all(isnan(x) for x in inst._y))
def test_init_xym_no_data(self):
domain = self.mock_domain(with_classes=True, with_metas=True)
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (len(self.class_vars), ))
self.assertEqual(inst._metas.shape, (3, ))
self.assertTrue(all(isnan(x) for x in inst._x))
self.assertTrue(all(isnan(x) for x in inst._y))
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert_array_equal(inst._metas, np.array([Unknown, Unknown, None]))
def test_init_x_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
vals = np.array([42, 0])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals)
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
domain = self.create_domain()
inst = Instance(domain, np.empty((0,)))
self.assertEqual(inst._x.shape, (0, ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
def test_init_x_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
lst = [42, 0]
vals = np.array(lst)
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals)
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
domain = self.create_domain()
inst = Instance(domain, [])
self.assertEqual(inst._x.shape, (0, ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xy_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
vals = np.array([42, 0, 1])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals[:2])
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._y[0], 1)
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xy_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
lst = [42, "M", "C"]
vals = np.array([42, 0, 2])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals[:2])
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._y[0], 2)
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xym_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
|
self.metas)
vals = np.array([42, "M", "B", "X", 43, "Foo"], dtype=object)
inst = Instance(domain, vals)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (2, ))
|
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._metas.shape, (3, ))
assert_array_equal(inst._x, np.array([42, 0]))
self.assertEqual(inst._y[0], 1)
assert_array_equal(inst._metas, np.array([0, 43, "Foo"], dtype=object))
def test_init_xym_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (2, ))
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._metas.shape, (3, ))
assert_array_equal(inst._x, np.array([42, 0]))
self.assertEqual(inst._y[0], 1)
assert_array_equal(inst._metas, np.array([0, 43, "Foo"], dtype=object))
def test_init_inst(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst2 = Instance(domain, inst)
assert_array_equal(inst2._x, np.array([42, 0]))
self.assertEqual(inst2._y[0], 1)
assert_array_equal(inst2._metas, np.array([0, 43, "Foo"], dtype=object))
domain2 = self.create_domain(["z", domain[1], self.metas[1]],
domain.class_vars,
[self.metas[0], "w", domain[0]])
inst2 = Instance(domain2, inst)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert_array_equal(inst2._x, np.array([Unknown, 0, 43]))
self.assertEqual(inst2._y[0], 1)
assert_array_equal(inst2._metas, np.array([0, Unknown, 42],
dtype=object))
def test_get_item(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
val = ins
|
asterix135/whoshouldivotefor
|
explorer/migrations/0005_auto_20170625_0617.py
|
Python
|
mit
| 474
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-25 10:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('explorer', '0004_district_shapefile_link')
|
,
]
operations = [
migrations.AlterField(
model_name='district',
name='shapefile_link'
|
,
field=models.URLField(blank=True, null=True),
),
]
|
goerz/pelican
|
pelican/tests/test_contents.py
|
Python
|
agpl-3.0
| 24,186
| 0.001778
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import six
from sys import platform
import locale
import os.path
from pelican.tests.support import unittest, get_settings
from pelican.contents import Page, Article, Static, URLWrapper, Author, Category
from pelican.settings import DEFAULT_CONFIG
from pelican.utils import path_to_url, truncate_html_words, SafeDatetime, posix_join
from pelican.signals import content_object_init
from jinja2.utils import generate_lorem_ipsum
# generate one paragraph, enclosed with <p>
TEST_CONTENT = str(generate_lorem_ipsum(n=1))
TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
class TestPage(unittest.TestCase):
def setUp(self):
super(TestPage, self).setUp()
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
self.page_kwargs = {
'content': TEST_CONTENT,
'context': {
'localsiteurl': '',
},
'metadata': {
'summary': TEST_SUMMARY,
'title': 'foo bar',
'author': Author('Blogger', DEFAULT_CONFIG),
},
'source_path': '/path/to/file/foo.ext'
}
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_use_args(self):
# Creating a page with arguments passed to the constructor should use
# them to initialise object's attributes.
metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', }
page = Page(TEST_CONTENT, metadata=metadata,
context={'localsiteurl': ''})
for key, value in metadata.items():
self.assertTrue(hasattr(page, key))
self.assertEqual(value, getattr(page, key))
self.assertEqual(page.content, TEST_CONTENT)
def test_mandatory_properties(self):
# If the title is not set, must throw an exception.
page = Page('content')
with self.assertRaises(NameError):
page.check_properties()
page = Page('content', metadata={'title': 'foobar'})
page.check_properties()
def test_summary_from_metadata(self):
# If a :summary: metadata is given, it should be used
page = Page(**self.page_kwargs)
self.assertEqual(page.summary, TEST_SUMMARY)
def test_summary_max_length(self):
# If a :SUMMARY_MAX_LENGTH: is set, and there is no other summary,
# generated summary should not exceed the given length.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs['settings'] = settings
del page_kwargs['metadata']['summary']
settings['SUMMARY_MAX_LENGTH'] = None
page = Page(**page_kwargs)
self.assertEqual(page.summary, TEST_CONTENT)
settings['SUMMARY_MAX_LENGTH'] = 10
page = Page(**page_kwargs)
self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10))
settings['SUMMARY_MAX_LENGTH'] = 0
page = Page(**page_kwargs)
self.assertEqual(page.summary, '')
def test_slug(self):
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
page_kwargs['settings'] = settings
settings['SLUGIFY_SOURCE'] = "title"
page = Page(**page_kwargs)
self.assertEqual(page.slug, 'foo-bar')
settings['SLUGIFY_SOURCE'] = "basename"
page = Page(**page_kwargs)
self.assertEqual(page.slug, 'foo')
def test_defaultlang(self):
# If no lang is given, default to the default one.
page = Page(**self.page_kwargs)
self.assertEqual(page.lang, DEFAULT_CONFIG['DEFAULT_LANG'])
# it is possible to specify the lang in the metadata infos
self.page_kwargs['metadata'].update({'lang': 'fr', })
page = Page(**self.page_kwargs)
self.assertEqual(page.lang, 'fr')
def test_save_as(self):
# If a lang is not the default lang, save_as should be set
# accordingly.
# if a title is defined, save_as should be set
page = Page(**self.page_kwargs)
self.assertEqual(page.save_as, "pages/foo-bar.html")
# if a language is defined, save_as should include it accordingly
self.page_kwargs['metadata'].update({'lang': 'fr', })
page = Page(**self.page_kwargs)
self.assertEqual(page.save_as, "pages/foo-bar-fr.html")
def test_metadata_url_format(self):
# Arbitrary metadata should be passed through url_format()
page = Page(**self.page_kwargs)
self.assertIn('summary', page.url_format.keys())
page.metadata['directory'] = 'test-dir'
page.settings = get_settings(PAGE_SAVE_AS='{directory}/{slug}')
self.assertEqual(page.save_as, 'test-dir/foo-bar')
def test_datetime(self):
# If DATETIME is set to a tuple, it should be used to override LOCALE
dt = SafeDatetime(2015, 9, 13)
page_kwargs = self._copy_page_kwargs()
# set its date to dt
page_kwargs['metadata']['date'] = dt
page = Page(**page_kwargs)
# page.locale_date is a unicode string in both python2 and python3
|
dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
# dt_date is a byte string in python2, and a unicode string in python3
# Let's make sure it is a unicode string (relies on python 3.3 supporting the u prefix)
if type(dt_date) != type(u''):
# python2:
dt_date = unicode(dt_date, 'utf8')
|
self.assertEqual(page.locale_date, dt_date )
page_kwargs['settings'] = get_settings()
# I doubt this can work on all platforms ...
if platform == "win32":
locale = 'jpn'
else:
locale = 'ja_JP.utf8'
page_kwargs['settings']['DATE_FORMATS'] = {'jp': (locale,
'%Y-%m-%d(%a)')}
page_kwargs['metadata']['lang'] = 'jp'
import locale as locale_module
try:
page = Page(**page_kwargs)
self.assertEqual(page.locale_date, '2015-09-13(\u65e5)')
except locale_module.Error:
# The constructor of ``Page`` will try to set the locale to
# ``ja_JP.utf8``. But this attempt will failed when there is no
# such locale in the system. You can see which locales there are
# in your system with ``locale -a`` command.
#
# Until we find some other method to test this functionality, we
# will simply skip this test.
unittest.skip("There is no locale %s in this system." % locale)
def test_template(self):
# Pages default to page, metadata overwrites
default_page = Page(**self.page_kwargs)
self.assertEqual('page', default_page.template)
page_kwargs = self._copy_page_kwargs()
page_kwargs['metadata']['template'] = 'custom'
custom_page = Page(**page_kwargs)
self.assertEqual('custom', custom_page.template)
def _copy_page_kwargs(self):
# make a deep copy of page_kwargs
page_kwargs = dict([(key, self.page_kwargs[key]) for key in
self.page_kwargs])
for key in page_kwargs:
if not isinstance(page_kwargs[key], dict):
break
page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey])
for subkey in page_kwargs[key]])
return page_kwargs
def test_signal(self):
# If a title is given, it should be used to generate the slug.
def receiver_test_function(sender, instance):
pass
content_object_init.connect(receiver_test_function, sender=Page)
Page(**self.page_kwargs)
self.assertTrue(content_object_init.has_receivers_for(Page))
def test_get_content(self):
# Test that the content is updated with the relative links to
# filenames, tags and categories.
settings = get_settings()
args = self.page_kwargs.copy()
args['settings'] = settings
#
|
sirex/datapackage-py
|
datapackage/resource.py
|
Python
|
mit
| 11,528
| 0.000087
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import six
import six.moves.urllib as urllib
import tabulator
from .resource_file import (
InlineResourceFile,
LocalResourceFile,
RemoteResourceFile,
)
class Resource(object):
'''Base class for all Data Package's resource types.
This classes will usually be created by :class:`DataPackage`, and not by
you. If you need to create one, use the :func:`Resource.load` factory
method.
The resources' attributes should only be altered through the
:data:`metadata` dict.
'''
@classmethod
def load(cls, metadata, default_base_path=None):
'''Factory method that loads the resource described in ``metadata``.
It'll first try to load the resource defined in ``metadata`` as a
:class:`TabularResource`. If that
|
fails, it'll fall
|
back to loading it
as a :class:`Resource`.
Args:
metadata (dict): The dict with the resource's metadata
default_base_path (str, optional): The base path to be used in case
the resource's data is in the local disk. Usually this would be
the base path of the `datapackage.json` this resource is in.
Returns:
Resource: The returned resource's class will depend on the type of
resource. If it was tabular, a :class:`TabularResource` will be
returned, otherwise, it'll be a :class:`Resource`.
'''
if TabularResource.can_handle(metadata):
resource_class = TabularResource
else:
resource_class = Resource
return resource_class(metadata, default_base_path)
def __init__(self, metadata, default_base_path=None):
self._metadata = metadata
self._base_path = default_base_path
@property
def metadata(self):
'''dict: The metadata this resource was created with.'''
return self._metadata
@property
def data(self):
'''Returns this resource's data.
The data should not be changed.
Returns:
bytes or data's type: This resource's data. If the data was
inlined, the return type will have the data's type. If not,
it'll be bytes.
Raises:
IOError: If there was some problem opening the data file (e.g. it
doesn't exist or we don't have permissions to read it).
'''
if not hasattr(self, '_data') or \
self._metadata_data_has_changed(self.metadata):
self._data = self._parse_data(self.metadata)
return self._data
@property
def local_data_path(self):
'''str: The absolute local path for the data.'''
path = self._absolute_path(self.metadata.get('path'))
if path:
return os.path.abspath(path)
@property
def remote_data_path(self):
'''str: The remote path for the data, if it exists.
The URL will only be returned if it has a scheme (e.g. http, https,
etc.) by itself or when considering the datapackage's or resource's
base path.
'''
url = self.metadata.get('url')
if url:
return url
else:
path = self._absolute_path(self.metadata.get('path'))
if path and _is_url(path):
return path
@property
def _resource_file(self):
if self._metadata_data_has_changed(self.metadata):
resource_file = self._load_resource_file()
else:
try:
resource_file = self.__resource_file
except AttributeError:
resource_file = self._load_resource_file()
self.__resource_file = resource_file
return self.__resource_file
def iter(self):
'''Lazily iterates over the data.
This method is useful when you don't want to load all data in memory at
once. The returned iterator behaviour depends on the type of the data.
If it's a string, it'll iterate over rows **without removing the
newlines**. The returned data type will be bytes, not string. If it's
any other type, the iterator will simply return it.
Returns:
iter: An iterator that yields this resource.
Raises:
IOError: If there was some problem opening the data file (e.g. it
doesn't exist or we don't have permissions to read it).
'''
if self._resource_file:
return iter(self._resource_file)
else:
raise ValueError('Resource has no data')
def _metadata_data_has_changed(self, metadata):
changed = False
metadata_data_ids = self._metadata_data_ids(metadata)
try:
changed = metadata_data_ids != self._original_metadata_data_ids
except AttributeError:
self._original_metadata_data_ids = metadata_data_ids
return changed
def _metadata_data_ids(self, metadata):
return {
'data_id': id(metadata.get('data')),
'data_path_id': id(metadata.get('path')),
'data_url_id': id(metadata.get('url'))
}
def _load_resource_file(self):
inline_data = self.metadata.get('data')
data_path = self.metadata.get('path')
data_url = self.metadata.get('url')
if inline_data:
return InlineResourceFile(inline_data)
if self.local_data_path and os.path.isfile(self.local_data_path):
return LocalResourceFile(self.local_data_path)
elif self.remote_data_path:
try:
return RemoteResourceFile(self.remote_data_path)
except IOError as e:
if data_url:
return RemoteResourceFile(data_url)
raise e
elif data_url:
return RemoteResourceFile(data_url)
if inline_data or data_path or data_url:
raise IOError('Couldn\'t load resource.')
def _parse_data(self, metadata):
return self._load_data()
def _load_data(self):
if self._resource_file:
return self._resource_file.read()
def _absolute_path(self, path):
if path is None or self._base_path is None:
return path
return os.path.join(self._base_path, path)
class TabularResource(Resource):
'''Subclass of :class:`Resource` that deals with tabular data.
It currently supports CSV, TSV, XLS, XLSX and JSON.
'''
@classmethod
def can_handle(cls, metadata):
'''bool: Returns True if this class can handle the resource in
metadata.'''
def get_extension(path_or_url):
path = urllib.parse.urlparse(path_or_url).path
return path.split('.')[-1].lower()
TABULAR_RESOURCE_FORMATS = ('csv', 'tsv', 'xls', 'xlsx', 'json')
metadata_data = metadata.get('data')
if metadata_data:
try:
cls._raise_if_isnt_tabular_data(metadata_data)
return True
except ValueError:
pass
metadata_format = metadata.get('format', '').lower()
metadata_path = metadata.get('path', '')
metadata_url = metadata.get('url', '')
if metadata_format in TABULAR_RESOURCE_FORMATS or \
get_extension(metadata_path) in TABULAR_RESOURCE_FORMATS or \
get_extension(metadata_url) in TABULAR_RESOURCE_FORMATS:
return True
return False
@staticmethod
def _raise_if_isnt_tabular_data(data):
tabular_types = (
list,
tuple,
)
valid = False
for tabular_type in tabular_types:
if isinstance(data, tabular_type):
valid = True
break
if not valid:
types_str = ', '.join([t.__name__ for t in tabular_types])
msg = 'Expected data type to be any of \'{0}\' but it was \'{1}\''
raise ValueError(msg.format(types_str, type(data).__name__))
d
|
Ladeia/QueroJogar
|
games/migrations/0001_initial.py
|
Python
|
mpl-2.0
| 1,032
| 0.002907
|
# Generated by Django 2.0.1 on 2018-01-19 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
|
migrations.CreateModel(
name='GameType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('game_days', models.CharField(max_length=100)),
('game_address', models.TextField()),
('game_type', models.ForeignKey(on_delete='cascade', to='games.GameType')),
],
),
]
|
janbender/PositionBasedDynamics
|
data/scenes/CarScene.py
|
Python
|
mit
| 4,517
| 0.019491
|
from SceneGenerator import *
import math
import random
def scale(vec, s):
vec[0] *= s
vec[1] *= s
vec[2] *= s
return vec
s = 1
scene = generateScene('CarScene', camPosition=[1,5,20], camLookat=[0,0,0])
addParameters(scene, h=0.005, maxIter=50, maxIterVel=50, contactTolerance=0.01, gravity=[0,-2,0], numberOfStepsPerRenderUpdate=10)
# floor
floorScale=[1000, 1, 1000]
floorScale = scale(floorScale, s)
floorT = [0,-0.5,0]
floorT = scale(floorT, s)
addRigidBody(scene, '../models/cube.obj', 2, coScale=floorScale,
scale=floorScale, translation=floorT,
dynamic=0, rest= 0.5)
carX = [0,2,0]
# chassis
restitution = 0.6
frict = 0.4
chassisX = add_vector(carX, [0,-0.1,0.1])
chassisScale = [2.5,0.4,1]
chassisScale = scale(chassisScale, s)
chassis = addRigidBody(scene, '../models/cube.obj', 0, coScale=chassisScale, scale=chassisScale,
translation=chassisX, dynamic=1, rest=restitution, friction=0.0, density = 200)
# damper bodies
damperScale = [0.2,0.1,0.2]
damperScale = scale(damperScale, s)
damperDensity = 50000;
damperX1 = add_vector(carX, [1.75, -0.7, 0.5])
dBody1 = addRigidBody(scene, '../models/cube.obj', 0, coScale=damperScale, scale=damperScale,
translation=damperX1, dynamic=1, rest=restitution, density = damperDensity)
damperX2 = add_vector(carX, [1.75, -0.7, -0.5])
dBody2 = addRigidBody(scene, '../models/cube.obj', 0, coScale=damperScale, scale=damperScale,
translation=damperX2, dynamic=1, rest=restitution, density = damperDensity)
damperX3 = add_vector(carX, [-1.75, -0.7, 0.5])
dBody3 = addRigidBody(scene, '../models/cube.obj', 0, coScale=damperScale, scale=damperScale,
translation=damperX3, dynamic=1, rest=restitution, density = damperDensity)
damperX4 = add_vector(carX, [-1.75, -0.7, -0.5])
dBody4 = addRigidBody(scene, '../models/cube.obj', 0, coScale=damperScale, scale=damperScale,
translation=damperX4, dynamic=1, rest=restitution, density = damperDensity)
# steering
steeringBodyX = add_vector(carX, [-1.75, -0.15, 0])
steeringBodyScale = [0.2,0.1,1]
steeringBodyScale = scale(steeringBodyScale, s)
steeringBody = addRigidBody(scene, '../models/cube.obj', 0, coScale=steeringBodyScale, scale=steeringBodyScale,
translation=steeringBodyX, dynamic=1, rest=restitution, density = 10000)
steeringMotorX = add_vector(carX, [-1.75, -0.4, 0])
addTargetAngleMotorHingeJoint(scene, chassis, steeringBody, steeringMotorX, [0, 1, 0], 0.707, [0,0, 2, 0.707, 8, 0.707, 12, -0.707, 18, -0.707, 20, 0], 1)
# wheels
wheelScale = [0.3,0.3,0.3]
wheelScale = scale(wheelScale, s)
wheelDensity = 600
wheelX1 = add_vector(carX, [1.75, -0.7, 0.9])
wheel1 = addRigidBody(scene, '../models/sphere.obj', 1, coScale=wheelScale, scale=wheelScale,
translation=wheelX1, dynamic=1, rest=restitution, friction=frict, density=wheelDensity)
wheelX2 = add_vector(carX, [1.75, -0.7, -0.9])
wheel2 = addRigidBody(scene, '../models/sphere.obj', 1, coScale=wheelScale, scale=wheelScale,
translation=wheelX2, dynamic=1, rest=restitution, friction=frict, density=wheelDensity)
wheelX3 = add_vector(carX, [-1.75, -0.7, 0.9])
wheel3 = addRigidBody(scene, '../models/sphere.obj', 1, coScale=wheelScale, scale=wheelScale,
translation=wheelX3, dynamic=1, rest=restitution, friction=frict, de
|
nsity=wheelDensity)
wheelX4 = add_vector(carX, [-1.75, -0.7, -0.9])
wheel4 = addRigidBody(scene, '../models/sphere.obj', 1, coScale=wheelScale, scale=wheelScale,
translation=wheelX4, dynamic=1, rest=restitution, friction=frict, density=wheelDensity)
motorX1 = add_vector(carX, [1.75, -0.7, 0.7])
motorX2 = add_vector(carX, [1.75, -0.7, -0.7])
motorX3 = add_vector(carX, [-1.75, -0.7, 0.7])
motorX4 = add_vector(carX, [-1.75, -0.7, -0.7])
addTargetVelocityMotorHingeJoint(scene, dB
|
ody1, wheel1, motorX1, [0, 0, 1], 10.0)
addTargetVelocityMotorHingeJoint(scene, dBody2, wheel2, motorX2, [0, 0, 1], 10.0)
addTargetVelocityMotorHingeJoint(scene, dBody3, wheel3, motorX3, [0, 0, 1], 10.0)
addTargetVelocityMotorHingeJoint(scene, dBody4, wheel4, motorX4, [0, 0, 1], 10.0)
addDamperJoint(scene, chassis, dBody1, [0, 1, 0], 500000.0)
addDamperJoint(scene, chassis, dBody2, [0, 1, 0], 500000.0)
addDamperJoint(scene, steeringBody, dBody3, [0, 1, 0], 500000.0)
addDamperJoint(scene, steeringBody, dBody4, [0, 1, 0], 500000.0)
writeScene(scene, 'CarScene.json')
|
owlabs/incubator-airflow
|
tests/sensors/test_timeout_sensor.py
|
Python
|
apache-2.0
| 3,102
| 0.000322
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import time
from datetime import timedelta
import pytest
from airflow import DAG
from airflow.exceptions import AirflowSensorTimeout, AirflowSkipException
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
class TimeoutTestSensor(BaseSensorOperator):
"""
Sensor that always returns the return_value provided
:param return_value: Set to true to mark the task as SKIPPED on failure
:type return_value: any
"""
@apply_defaults
def __init__(self,
return_value=False,
*args,
**kwargs):
self.return_value = return_value
super(TimeoutTestSensor, self).__init__(*args, **kwargs)
def poke(self, context):
return self.return_value
def execute(self, context):
started_at = timezone.utcnow()
time_jump = self.params.get('time_jump')
while not self.poke(context):
if time_jump:
started_at -= time_jump
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
time.sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
class SensorTimeoutTest(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args
|
=args)
@pytest.mark.quarantined
def test_timeout(self):
t = TimeoutTestSensor(
|
task_id='test_timeout',
execution_timeout=timedelta(days=2),
return_value=False,
poke_interval=5,
params={'time_jump': timedelta(days=2, seconds=1)},
dag=self.dag
)
self.assertRaises(
AirflowSensorTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True
)
|
MozillaSecurity/FuzzManager
|
server/crashmanager/migrations/0001_squashed_0020_add_app_permissions.py
|
Python
|
mpl-2.0
| 15,946
| 0.000564
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-12-09 00:06
from __future__ import unicode_literals
from django.conf import settings
import django.core.files.storage
from django.db import migrations, models
import django.db.migrations.operations.special
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
replaces = [
("crashmanager", "0001_initial"),
("crashmanager", "0002_bugzillatemplate_security"),
("crashmanager", "0003_bucket_frequent"),
("crashmanager", "0004_add_tool"),
("crashmanager", "0005_add_user"),
("crashmanager", "0006_user_defaultproviderid"),
("crashmanager", "0007_bugzillatemplate_comment"),
("crashmanager", "0008_crashentry_crashaddressnumeric"),
("crashmanager", "0009_copy_crashaddress"),
("crashmanager", "0010_bugzillatemplate_security_group"),
("crashmanager", "0011_bucket_permanent"),
("crashmanager", "0012_crashentry_cachedcrashinfo"),
("crashmanager", "0013_init_cachedcrashinfo"),
("crashmanager", "0014_bugzillatemplate_testcase_filename"),
("crashmanager", "0015_crashentry_triagedonce"),
("crashmanager", "0016_auto_20160308_1500"),
("crashmanager", "0017_user_restricted"),
("crashmanager", "0018_auto_20170620_1503"),
("crashmanager", "0019_bucket_optimizedsignature"),
("crashmanager", "0020_add_app_permissions"),
]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Bucket",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
ser
|
ialize=False,
verbose_name="ID",
),
),
("signature", models.TextField()),
("shortDescription", models.CharField(blank=True, max_length=1023)),
],
),
migrations.CreateModel(
name="Bug",
fields=[
(
|
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("externalId", models.CharField(blank=True, max_length=255)),
("closed", models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name="BugProvider",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("classname", models.CharField(max_length=255)),
("hostname", models.CharField(max_length=255)),
("urlTemplate", models.CharField(max_length=1023)),
],
),
migrations.CreateModel(
name="BugzillaTemplate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.TextField()),
("product", models.TextField()),
("component", models.TextField()),
("summary", models.TextField(blank=True)),
("version", models.TextField()),
("description", models.TextField(blank=True)),
("whiteboard", models.TextField(blank=True)),
("keywords", models.TextField(blank=True)),
("op_sys", models.TextField(blank=True)),
("platform", models.TextField(blank=True)),
("priority", models.TextField(blank=True)),
("severity", models.TextField(blank=True)),
("alias", models.TextField(blank=True)),
("cc", models.TextField(blank=True)),
("assigned_to", models.TextField(blank=True)),
("qa_contact", models.TextField(blank=True)),
("target_milestone", models.TextField(blank=True)),
("attrs", models.TextField(blank=True)),
("security", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name="Client",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name="CrashEntry",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(default=django.utils.timezone.now)),
("rawStdout", models.TextField(blank=True)),
("rawStderr", models.TextField(blank=True)),
("rawCrashData", models.TextField(blank=True)),
("metadata", models.TextField(blank=True)),
("env", models.TextField(blank=True)),
("args", models.TextField(blank=True)),
("crashAddress", models.CharField(blank=True, max_length=255)),
("shortSignature", models.CharField(blank=True, max_length=255)),
(
"bucket",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="crashmanager.Bucket",
),
),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="crashmanager.Client",
),
),
],
),
migrations.CreateModel(
name="OS",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=63)),
("version", models.CharField(blank=True, max_length=127, null=True)),
],
),
migrations.CreateModel(
name="Platform",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name="Product",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbos
|
Urumasi/Flask-Bones
|
app/data/__init__.py
|
Python
|
mit
| 4,084
| 0.001959
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from .models import Group, Firma, User, oauth, OAuthSignIn
from random import randint
from app.utils import fake_firma, fake_user
def populate_db(num_users=5, num_groups=15, num_firms=5):
"""
Fills the data will fake data.
"""
admin_username = 'cburmeister'
admin_email = 'cburmeister@discogs.com'
admin_password = 'test123'
users = []
for _ in range(int(num_users)):
users.append(
fake_user()
)
"""users.append(
User(
admin_username,
admin_email,
admin_password,
fake.ipv4(),
active=True,
is_sadmin=True
)
)"""
for user in users:
db.session.add(user)
firms = []
for _ in range(int(num_firms)):
firms.append(
fake_firma()
)
for firm in firms:
db.session.add(firm)
db.session.commit()
class DataTable(object):
"""
Represents a sortable, filterable, searchable, and paginated set of data,
generated by arguments in the request values.
TODO:
- flask-ext for access to request values?
- throw some custom errors when getting fields, etc
- get rid of the 4 helpers that do the same thing
- should this generate some html to help with visualizing the data?
"""
def __init__(self, model, columns, sortable, searchable, filterable, limits, request):
self.model = model
self.query = self.model.query
self.columns = columns
self.sortable = sortable
self.orders = ['asc', 'desc']
self.searchable = searchable
self.filterable = filterable
self.limits = limits
self.get_selected(request)
for f in self.filterable:
self.selected_filter = request.values.get(f.name, None)
self.filter(f.name, self.selected_filter)
self.search(self.selected_query)
self.sort(self.selected_sort, self.selected_order)
self.paginate(self.selected_page, self.selected_limit)
def get_selected(self, request):
self.selected_sort = request.values.get('sort', self.sortables[0])
self.selected_order = request.values.get('order', self.orders[0])
self.selected_query = request.values.get('query', None)
self.selected_limit = request.values.get('limit', self.limits[1], type=int)
self.selected_page = request.values.get('page', 1, type=int)
@property
def _columns(self):
return [x.name for x in self.columns]
@property
def sortables(self):
return [x.name for x in self.sortable]
@property
def searchables(self):
return [x.name for x i
|
n self.searchable]
@prop
|
erty
def filterables(self):
return [x.name for x in self.filterable]
@property
def colspan(self):
"""Length of all columns."""
return len(self.columns) + len(self.sortable) + len(self.searchable)
def sort(self, field, order):
"""Sorts the data based on a field & order."""
if field in self.sortables and order in self.orders:
field = getattr(getattr(self.model, field), order)
self.query = self.query.order_by(field())
def filter(self, field, value):
"""Filters the query based on a field & value."""
if field and value:
field = getattr(self.model, field)
self.query = self.query.filter(field==value)
def search(self, search_query):
"""Filters the query based on a list of fields & search query."""
if search_query:
search_query = '%%%s%%' % search_query
from sqlalchemy import or_
fields = [getattr(self.model, x) for x in self.searchables]
self.query = self.query.filter(or_(*[x.like(search_query) for x in fields]))
def paginate(self, page, limit):
"""Paginate the query based on a page & limit."""
self.query = self.query.paginate(page, limit)
|
1065865483/0python_script
|
Python/01.py
|
Python
|
mit
| 415
| 0.014164
|
# # a=1
# # b=a
# # print(a,b)
# #
# # for i in range(1,
|
10,2):
# # print(i)
#
# def fun():
# a=10
# print(a)
# return a +100
#
# sun = fun()
# # sun + 100
#
# print(fun())
# print(sun)
file = open('my file.txt','r') #以读文件的形式打开文件
# content = file.readline() 仅读取第一行
content = file.readlines() #读取所有行,并以列表形式存储
content[3]
print
|
(content[3])
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-64/14-oop/sixth.py
|
Python
|
gpl-3.0
| 462
| 0.025974
|
#!/usr/bin
|
/python
# bigcinemas
class InvalidAge(Exception):
def __init__(self,age):
self.age = age
def validate_age(age):
if age < 18:
raise InvalidAge(age)
else:
return "Welcome to the movies!!"
age = int(raw_input("please enter your age:"))
#print validate_age(age)
try:
validate_age(age)
# except Exception as e:
except InvalidAge as e:
print "Buddy!! you are very young at {}!! Grow up a bit.".format(e.age)
else:
print valida
|
te_age(age)
|
vetu11/piloco
|
bin/paybot.py
|
Python
|
gpl-3.0
| 4,476
| 0.007872
|
# coding=utf-8
from .token import PAYMENT_PROVIDER_TOKEN
from telegram import (LabeledPrice, InlineKeyboardButton, InlineKeyboardMarkup, ParseMode)
from telegram.ext import (MessageHandler, CallbackQueryHandler, Filters, PreCheckoutQueryHandler, ShippingQueryHandler)
import logging
logger = logging.getLogger(__name__)
class TelegramDonation:
def manual_cuantity(self, bot, update):
try:
return 0 # CONVERSATION.END
except: pass
def mostrar_donacion(self, bot, update):
if update.message:
pass # TODO
else:
donation_amount = int(update.callback_query.data.split("-")[1])
if donation_amount - 100:
less = donation_amount - 50
else:
less = donation_amount
more = donation_amount + 50
eur_num_to_text = ("%s" % (donation_amount / 100.0) + "0").replace(".", ",")
msg = "_TEST_ actualmente detrás del código de Piloco hay una única persona trabajando para mejorarlo. " \
"Bla bla bla.\nSi puedes hacer una aportación económica sería de gran ayuda, para mantener los se" \
"rvidores y para poder dedicar más tiempo a Piloco."
keyboard = [[InlineKeyboardBut
|
ton("Cancelar 💔", callback_data="donation_cancel")],
[InlineKeyboardButton("➖", callback_data="donation_new-%s" % less),
Inli
|
neKeyboardButton("%s €" % eur_num_to_text, callback_data="donation_custom"),
InlineKeyboardButton("➕", callback_data="donation_new-%s" % more)],
[InlineKeyboardButton("Donar %s € ❤️" % eur_num_to_text, callback_data="donate-%s" % donation_amount)]]
if update.message:
update.message.reply_text(msg, reply_markup=InlineKeyboardMarkup(keyboard))
else:
update.callback_query.message.edit_text(msg, reply_markup=InlineKeyboardMarkup(keyboard))
update.callback_query.answer()
def start_without_shipping_callback(self, bot, update):
chat_id = update.callback_query.message.chat_id
title = "Donación"
description = "Aportación económica para el mantenimiento y desarrollo de Pilocobot."
payload = "Custom-Payload"
provider_token = PAYMENT_PROVIDER_TOKEN
start_parameter = "test-payment"
currency = "EUR"
price = int(update.callback_query.data.split("-")[1])
prices = [LabeledPrice("Donacion", price)]
update.callback_query.message.edit_reply_markup(reply_markup=InlineKeyboardMarkup([]))
# optionally pass need_name=True, need_phone_number=True,
# need_email=True, need_shipping_address=True, is_flexible=True
bot.sendInvoice(chat_id, title, description, payload,
provider_token, start_parameter, currency, prices)
# after (optional) shipping, it's the pre-checkout
def precheckout_callback(self, bot, update):
query = update.pre_checkout_query
# check the payload, is this from your bot?
if query.invoice_payload != 'Custom-Payload':
# answer False pre_checkout_query
bot.answer_pre_checkout_query(pre_checkout_query_id=query.id, ok=False,
error_message="Parece que ha habido un error")
else:
bot.answer_pre_checkout_query(pre_checkout_query_id=query.id, ok=True)
# finally, after contacting to the payment provider...
def successful_payment_callback(self, bot, update):
# do something after successful receive of payment
update.message.reply_text(
"¡La transacción se ha completado con éxito! Gracias por tu aportación, "
"has recibido %s puntos reputación." % update.message.successful_payment.total_amount)
TelegramDonation = TelegramDonation()
def main():
# Optional handler if your product requires shipping
dp.add_handler(ShippingQueryHandler(shipping_callback))
# Pre-checkout handler to final check
dp.add_handler(PreCheckoutQueryHandler(precheckout_callback))
# Success! Notify your user!
dp.add_handler(MessageHandler(Filters.successful_payment, successful_payment_callback))
dp.add_handler(CallbackQueryHandler(mostrar_donacion, pattern="^donation_new-\d*"))
dp.add_handler(CallbackQueryHandler(start_without_shipping_callback, pattern="^donate-\d*"))
# Start the Bot
updater.start_polling()
|
nawawi/wkhtmltopdf
|
webkit/Source/WebCore/make-file-arrays.py
|
Python
|
lgpl-3.0
| 5,529
| 0.000723
|
#!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LI
|
ABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Usage: make-file-arrays.py [--condition=condition-string] --out-h=<header-file-name> --out-cpp=<cpp-file-name> <input-file>...
import os.path
import re
import sys
from optparse import OptionParser
def make_variable_name_and_read(file_name):
result = re.match(r"([\w\d_]+)\.([\w\d_]+)", os.path.basename(file_name))
if not result:
print "Invalid input file name:", os.path.basename(file_name)
sys.exit(1)
variable_name = result.group(1)[0].lower() + result.group(1)[1:] + result.group(2).capitalize()
file = open(file_name, "rb")
content = file.read()
file.close()
return (variable_name, content)
def strip_whitespace_and_comments(file_name, content):
result = re.match(r".*\.([^.]+)", file_name)
if not result:
print "The file name has no extension:", file_name
sys.exit(1)
extension = result.group(1).lower()
multi_line_comment = re.compile(r"/\*.*?\*/", re.MULTILINE | re.DOTALL)
single_line_comment = re.compile(r"//.*$", re.MULTILINE)
repeating_space = re.compile(r"[ \t]+", re.MULTILINE)
leading_space = re.compile(r"^[ \t]+", re.MULTILINE)
trailing_space = re.compile(r"[ \t]+$", re.MULTILINE)
empty_line = re.compile(r"\n+")
if extension == "js":
content = multi_line_comment.sub("", content)
content = single_line_comment.sub("", content)
content = repeating_space.sub(" ", content)
content = leading_space.sub("", content)
content = trailing_space.sub("", content)
content = empty_line.sub("\n", content)
elif extension == "css":
content = multi_line_comment.sub("", content)
content = repeating_space.sub(" ", content)
content = leading_space.sub("", content)
content = trailing_space.sub("", content)
content = empty_line.sub("\n", content)
return content
def main():
parser = OptionParser()
parser.add_option("--out-h", dest="out_header")
parser.add_option("--out-cpp", dest="out_cpp")
parser.add_option("--condition", dest="flag")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Need one or more input files")
if not options.out_header:
parser.error("Need to specify --out-h=filename")
if not options.out_cpp:
parser.error("Need to specify --out-cpp=filename")
if options.flag:
options.flag = options.flag.replace(" AND ", " && ")
options.flag = options.flag.replace(" OR ", " || ")
header_file = open(options.out_header, "w")
if options.flag:
header_file.write("#if " + options.flag + "\n")
header_file.write("namespace WebCore {\n")
cpp_file = open(options.out_cpp, "w")
cpp_file.write("#include \"config.h\"\n")
cpp_file.write("#include \"" + os.path.basename(options.out_header) + "\"\n")
if options.flag:
cpp_file.write("#if " + options.flag + "\n")
cpp_file.write("namespace WebCore {\n")
for file_name in args:
(variable_name, content) = make_variable_name_and_read(file_name)
content = strip_whitespace_and_comments(file_name, content)
size = len(content)
header_file.write("extern const char %s[%d];\n" % (variable_name, size))
cpp_file.write("const char %s[%d] = {\n" % (variable_name, size))
for index in range(size):
char_code = ord(content[index])
if char_code < 128:
cpp_file.write("%d" % char_code)
else:
cpp_file.write("'\\x%02x'" % char_code)
cpp_file.write("," if index != len(content) - 1 else "};\n")
if index % 20 == 19:
cpp_file.write("\n")
cpp_file.write("\n")
header_file.write("}\n")
if options.flag:
header_file.write("#endif\n")
header_file.close()
cpp_file.write("}\n")
if options.flag:
cpp_file.write("#endif\n")
cpp_file.close()
if __name__ == "__main__":
main()
|
Nitrate/Nitrate
|
src/tcms/core/models/__init__.py
|
Python
|
gpl-2.0
| 2,359
| 0.001696
|
# -*- coding: utf-8 -*-
# isort: skip_file
from django.contrib.auth.models import User
from django.db import models
# This line cannot move to the below according to the isort linter.
# Resolve it firstly, then apply isort again.
from .base import TCMSContentTypeBaseModel # noqa
from tcms.logs.views import TCMSLog
from tcms.testruns import signals as run_watchers # noqa
from tcms.xmlrpc.serializer import XMLRPCSerializer
from .base import UrlMixin
User._meta.ordering = ["username"]
class TCMSActionModel(models.Model, UrlMixin):
"""
TCMS action models.
Use for global log system.
"""
class Meta:
abstract = True
@classmethod
def to_xmlrpc(cls, query={}):
"""
Convert the query set for XMLRPC
"""
s = XMLRPCSerializer(queryset=cls.objects.filter(**query).order_by("pk"))
return s.serialize_queryset()
def serialize(self):
"""
Convert the model for XMLPRC
"""
s = XMLRPCSerializer(model=self)
return s.serialize_model()
def log(self):
log = TCMSLog(model=self)
return log.list()
def log_ac
|
tion(self, who, new_value, field="", original_value=""):
log = TCMSLog(model=self)
log.make(who=who, field=field, original_value=original_value, new_value=new_value)
return log
def clean(self):
strip_types = (
models.CharField,
models.TextField,
models.URLField,
models.EmailField,
models.IPAddressField,
models.Gen
|
ericIPAddressField,
models.SlugField,
)
# FIXME: reconsider alternative solution
# It makes no sense to add field name each time when a new field is
# added and it accepts values containing either \t, \r and \n.
ignored_fields = ("notes", "issue_report_params", "issue_report_templ")
for field in self._meta.fields:
# TODO: hardcode 'notes' here
if field.name not in ignored_fields and isinstance(field, strip_types):
value = getattr(self, field.name)
if value:
setattr(
self,
field.name,
value.replace("\t", " ").replace("\n", " ").replace("\r", " "),
)
|
Shaps/ansible
|
lib/ansible/modules/files/file.py
|
Python
|
gpl-3.0
| 37,706
| 0.003739
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: file
version_added: historical
short_description: Manage files and file properties
extends_documentation_fragment: files
description:
- Set attributes of files, symlinks or directories.
- Alternatively, remove files, symlinks or directories.
- Many other modules support the same options as the C(file) module - including M(copy), M(template), and M(assemble).
- For Windows targets, use the M(win_file) module instead.
options:
path:
description:
- Path to the file being managed.
type: path
required: yes
aliases: [ dest, name ]
state:
description:
- If C(absent), directories will be recursively deleted, and files or symlinks will
be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
not exist as the state did not change.
- If C(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- If C(file), without any other options this works mostly as a 'stat' and will return the current state of C(path).
Even with other options (i.e C(mode)), the file will be modified but will NOT be created if it does not exist;
see the C(touch) value or the M(copy) or M(template) module if you want that behavior.
- If C(hard), the hard link will be created or changed.
- If C(link), the symbolic link will be created or changed.
- If C(touch) (new in 1.4), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
default: file
choices: [ absent, directory, file, hard, link, touch ]
src:
description:
- Path of the file to link to.
- This applies only to C(state=link) and C(state=hard).
- For C(state=link), this will also accept a non-existing path.
- Relative paths are relative to the file being created (C(path)) which is how
the Unix command C(ln -s SRC DEST) treats relative paths.
type: path
recurse:
description:
- Recursively set the specified file attributes on directory contents.
- This applies only when C(state) is set to C(directory).
type: bool
default: no
version_added: '1.1'
force:
description:
- >
Force the creation of the symlinks in two cases: the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
C(path) file and create symlink to the C(src) file in place of it).
type: boo
|
l
default: no
follow:
description:
- This flag indicates that filesys
|
tem links, if they exist, should be followed.
- Previous to Ansible 2.5, this was C(no) by default.
type: bool
default: yes
version_added: '1.8'
modification_time:
description:
- This parameter indicates the time the file's modification time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: "2.7"
modification_time_format:
description:
- When used with C(modification_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
access_time:
description:
- This parameter indicates the time the file's access time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: '2.7'
access_time_format:
description:
- When used with C(access_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
seealso:
- module: assemble
- module: copy
- module: stat
- module: template
- module: win_file
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Change file ownership, group and permissions
file:
path: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Give insecure permissions to an existing file
file:
path: /work
owner: root
group: root
mode: '1777'
- name: Create a symbolic link
file:
src: /file/to/link/to
dest: /path/to/symlink
owner: foo
group: foo
state: link
- name: Create two hard links
file:
src: '/tmp/{{ item.src }}'
dest: '{{ item.dest }}'
state: hard
loop:
- { src: x, dest: y }
- { src: z, dest: k }
- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
file:
path: /etc/foo.conf
state: touch
mode: u=rw,g=r,o=r
- name: Touch the same file, but add/remove some permissions
file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
- name: Touch again the same file, but dont change times this makes the task idempotent
file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
modification_time: preserve
access_time: preserve
- name: Create a directory if it does not exist
file:
path: /etc/some_directory
state: directory
mode: '0755'
- name: Update modification and access time of given file
file:
path: /etc/some_file
state: file
modification_time: now
access_time: now
- name: Set access time based on seconds from epoch value
file:
path: /etc/another_file
state: file
access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
- name: Recursively change ownership of a directory
file:
path: /etc/foo
state: directory
recurse: yes
owner: foo
group: foo
- name: Remove file (delete file)
file:
path: /etc/foo.txt
state: absent
- name: Recursively remove directory
file:
path: /etc/foo
state: absent
'''
RETURN = r'''
'''
import errno
import os
import shutil
import sys
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
# There will only be a single AnsibleModule object per module
module = None
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def __repr__(self):
print('AnsibleModuleError(results={0})'.format(self.results))
class ParameterError(AnsibleModuleError):
pass
class Sentinel(object):
def __new__(cls, *args, **kwargs):
return cls
def _ansible_excepthook(exc_type, exc_value, tb):
# Using an exception allows us to catch it if the calling code knows it can recover
if issubclass(exc_type, AnsibleModuleError):
module.fail_json(**exc_value.results)
else:
sys.__excepthook__(exc_type, exc_value, tb)
def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the futu
|
DataONEorg/d1_python
|
gmn/src/d1_gmn/app/revision.py
|
Python
|
apache-2.0
| 18,605
| 0.002687
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating revision chains in the database."""
import d1_common.types.exceptions
import d1_gmn.app
import d1_gmn.app.did
import d1_gmn.app.model_util
import d1_gmn.app.models
def create_or_update_chain(pid, sid, obsoletes_pid, obsoleted_by_pid):
chain_model = _get_chain_by_pid(pid
|
)
if chain_model:
_set_chain_sid(chain_model, sid)
else:
_add_sciobj(pid, sid, obsoletes_
|
pid, obsoleted_by_pid)
_update_sid_to_last_existing_pid_map(pid)
def delete_chain(pid):
pid_to_chain_model = d1_gmn.app.models.ChainMember.objects.get(pid__did=pid)
chain_model = pid_to_chain_model.chain
pid_to_chain_model.delete()
if not d1_gmn.app.models.ChainMember.objects.filter(chain=chain_model).exists():
if chain_model.sid:
# Cascades back to chain_model.
d1_gmn.app.models.IdNamespace.objects.filter(
did=chain_model.sid.did
).delete()
else:
chain_model.delete()
def cut_from_chain(sciobj_model):
"""Remove an object from a revision chain.
The object can be at any location in the chain, including the head or tail.
Preconditions:
- The object with the pid is verified to exist and to be a member of an
revision chain. E.g., with:
d1_gmn.app.views.asserts.is_existing_object(pid)
d1_gmn.app.views.asserts.is_in_revision_chain(pid)
Postconditions:
- The given object is a standalone object with empty obsoletes, obsoletedBy and
seriesId fields.
- The previously adjacent objects in the chain are adjusted to close any gap that
was created or remove dangling reference at the head or tail.
- If the object was the last object in the chain and the chain has a SID, the SID
reference is shifted over to the new last object in the chain.
"""
if _is_head(sciobj_model):
old_pid = sciobj_model.obsoletes.did
_cut_head_from_chain(sciobj_model)
elif _is_tail(sciobj_model):
old_pid = sciobj_model.obsoleted_by.did
_cut_tail_from_chain(sciobj_model)
else:
old_pid = sciobj_model.obsoleted_by.did
_cut_embedded_from_chain(sciobj_model)
_update_sid_to_last_existing_pid_map(old_pid)
def get_all_pid_by_sid(sid):
return [c.pid.did for c in _get_all_chain_member_queryset_by_sid(sid)]
# def set_revision(pid, obsoletes_pid=None, obsoleted_by_pid=None):
# sciobj_model = d1_gmn.app.util.get_sci_model(pid)
# set_revision_links(sciobj_model, obsoletes_pid, obsoleted_by_pid)
# sciobj_model.save()
def resolve_sid(sid):
"""Get the PID to which the ``sid`` currently maps.
Preconditions:
- ``sid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_sid().
"""
return d1_gmn.app.models.Chain.objects.get(sid__did=sid).head_pid.did
def get_sid_by_pid(pid):
"""Given the ``pid`` of the object in a chain, return the SID for the chain.
Return None if there is no SID for the chain. This operation is also valid
for standalone objects which may or may not have a SID.
This is the reverse of resolve.
All known PIDs are associated with a chain.
Preconditions:
- ``pid`` is verified to exist. E.g., with
d1_gmn.app.views.asserts.is_existing_object().
"""
return d1_gmn.app.did.get_did_by_foreign_key(_get_chain_by_pid(pid).sid)
def set_revision_links(sciobj_model, obsoletes_pid=None, obsoleted_by_pid=None):
if obsoletes_pid:
sciobj_model.obsoletes = d1_gmn.app.did.get_or_create_did(obsoletes_pid)
_set_revision_reverse(sciobj_model.pid.did, obsoletes_pid, is_obsoletes=False)
if obsoleted_by_pid:
sciobj_model.obsoleted_by = d1_gmn.app.did.get_or_create_did(obsoleted_by_pid)
_set_revision_reverse(sciobj_model.pid.did, obsoleted_by_pid, is_obsoletes=True)
sciobj_model.save()
def is_obsoletes_pid(pid):
"""Return True if ``pid`` is referenced in the obsoletes field of any object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return d1_gmn.app.models.ScienceObject.objects.filter(obsoletes__did=pid).exists()
def is_obsoleted_by_pid(pid):
"""Return True if ``pid`` is referenced in the obsoletedBy field of any object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return d1_gmn.app.models.ScienceObject.objects.filter(
obsoleted_by__did=pid
).exists()
def is_revision(pid):
"""Return True if ``pid`` is referenced in the obsoletes or obsoletedBy field of any
object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return is_obsoletes_pid(pid) or is_obsoleted_by_pid(pid)
def _add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid):
is_added = _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid)
if not is_added:
# if not obsoletes_pid and not obsoleted_by_pid:
_add_standalone(pid, sid)
# else:
def _add_standalone(pid, sid):
# assert_sid_unused(sid)
_create_chain(pid, sid)
def _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid):
_assert_sid_is_in_chain(sid, obsoletes_pid)
_assert_sid_is_in_chain(sid, obsoleted_by_pid)
obsoletes_chain_model = _get_chain_by_pid(obsoletes_pid)
obsoleted_by_chain_model = _get_chain_by_pid(obsoleted_by_pid)
sid_chain_model = _get_chain_by_sid(sid) if sid else None
chain_model = obsoletes_chain_model or obsoleted_by_chain_model or sid_chain_model
if not chain_model:
return False
if obsoletes_chain_model and obsoletes_chain_model != chain_model:
_merge_chains(chain_model, obsoletes_chain_model)
if obsoleted_by_chain_model and obsoleted_by_chain_model != chain_model:
_merge_chains(chain_model, obsoleted_by_chain_model)
_add_pid_to_chain(chain_model, pid)
_set_chain_sid(chain_model, sid)
return True
def _merge_chains(chain_model_a, chain_model_b):
"""Merge two chains.
For use when it becomes known that two chains that were created separately
actually are separate sections of the same chain
E.g.:
- A obsoleted by X is created. A has no SID. X does not exist yet. A chain is
created for A.
- B obsoleting Y is created. B has SID. Y does not exist yet. A chain is created
for B.
- C obsoleting X, obsoleted by Y is created. C tells us that X and Y are in the
same chain, which means that A and B are in the same chain. At this point, the
two chains need to be merged. Merging the chains causes A to take on the SID of
B.
"""
_set_chain_sid(
chain_model_a, d1_gmn.app.did.get_did_by_foreign_key(chain_model_b.sid)
)
for member_model in _get_all_chain_member_queryset_by_chain(chain_model_b):
member_model.chain = chain_model_a
member_model.save()
chain_model_b.delete()
def _add_pid_to_chain(chain_model, pid):
chain_member_model = d1_gmn.app.models.ChainMember(
chain=chain_model, pid=d1_gmn.app.did.get_or_create_did(pid)
)
chain_member_model.save()
de
|
luzfcb/django-configurations
|
tests/test_values.py
|
Python
|
bsd-3-clause
| 12,772
| 0.000313
|
import decimal
import os
from contextlib import contextmanager
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from mock import patch
from configurations.values import (Value, BooleanValue, IntegerValue,
FloatValue, DecimalValue, ListValue,
TupleValue, SetValue, DictValue,
URLValue, EmailValue, IPValue,
RegexValue, PathValue, SecretValue,
DatabaseURLValue, EmailURLValue,
CacheURLValue, BackendsValue,
CastingMixin, SearchURLValue)
@contextmanager
def env(**kwargs):
with patch.dict(os.environ, clear=True, **kwargs):
yield
class FailingCasterValue(CastingMixin, Value):
caster = 'non.existing.caster'
class ValueTests(TestCase):
def test_value(self):
value = Value('default', environ=False)
self.assertEqual(value.setup('TEST'), 'default')
with env(DJANGO_TEST='override'):
self.assertEqual(value.setup('TEST'), 'default')
@patch.dict(os.environ, clear=True, DJANGO_TEST='override')
def test_env_var(self):
value = Value('default')
self.assertEqual(value.setup('TEST'), 'override')
self.assertNotEqual(value.setup('TEST'), value.default)
self.assertEqual(value.to_python(os.environ['DJANGO_TEST']),
value.setup('TEST'))
def test_value_reuse(self):
value1 = Value('default')
value2 = Value(value1)
self.assertEqual(value1.setup('TEST1'), 'default')
self.assertEqual(value2.setup('TEST2'), 'default')
with env(DJANGO_TEST1='override1', DJANGO_TEST2='override2'):
self.assertEqual(value1.setup('TEST1'), 'override1')
self.assertEqual(value2.setup('TEST2'), 'override2')
def test_env_var_prefix(self):
with patch.dict(os.environ, clear=True, ACME_TEST='override'):
value = Value('default', environ_prefix='ACME')
self.assertEqual(value.setup('TEST'), 'override')
with patch.dict(os.environ, clear=True, TEST='override'):
value = Value('default', environ_prefix='')
self.assertEqual(value.setup('TEST'), 'override')
def test_boolean_values_true(self):
value = BooleanValue(False)
for truthy in value.true_values:
with env(DJANGO_TEST=truthy):
self.assertTrue(value.setup('TEST'))
def test_boolean_values_faulty(self):
self.assertRaises(ValueError, BooleanValue, 'false')
def test_boolean_values_false(self):
value = BooleanValue(True)
for falsy in value.false_values:
with env(DJANGO_TEST=falsy):
self.assertFalse(value.setup('TEST'))
def test_boolean_values_nonboolean(self):
value = BooleanValue(True)
with env(DJANGO_TEST='nonboolean'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_integer_values(self):
value = IntegerValue(1)
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), 2)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_float_values(self):
value = FloatValue(1.0)
with env(DJANGO_TEST='2.0'):
self.assertEqual(value.setup('TEST'), 2.0)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_decimal_values(self):
value = DecimalValue(decimal.Decimal(1))
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), decimal.Decimal(2))
with env(DJANGO_TEST='nondecimal'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_failing_caster(self):
self.assertRaises(ImproperlyConfigured, FailingCasterValue)
def test_list_values_default(self):
value = ListValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), [])
def test_list_values_separator(self):
value = ListValue(separator=':')
with env(DJANGO_TEST='/usr/bin:/usr/sbin:/usr/local/bin'):
self.assertEqual(value.setup('TEST'),
['/usr/bin', '/usr/sbin', '/usr/local/bin'])
def test_List_values_converter(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2, 2])
value = ListValue(converter=float)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2.0, 2.0])
def test_list_values_custom_converter(self):
value = ListValue(converter=lambda x: x * 2)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['22', '22'])
def test_list_values_converter_exception(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,b'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_tuple_values_default(self):
value = TupleValue()
|
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ('2', '2'
|
))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), ())
def test_set_values_default(self):
value = SetValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), set(['2', '2']))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), set(['2', '2']))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), set())
def test_dict_values_default(self):
value = DictValue()
with env(DJANGO_TEST='{2: 2}'):
self.assertEqual(value.setup('TEST'), {2: 2})
expected = {2: 2, '3': '3', '4': [1, 2, 3]}
with env(DJANGO_TEST="{2: 2, '3': '3', '4': [1, 2, 3]}"):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST="""{
2: 2,
'3': '3',
'4': [1, 2, 3],
}"""):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), {})
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_email_values(self):
value = EmailValue('spam@eg.gs')
with env(DJANGO_TEST='spam@sp.am'):
self.assertEqual(value.setup('TEST'), 'spam@sp.am')
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_url_values(self):
value = URLValue('http://eggs.spam')
with env(DJANGO_TEST='http://spam.eggs'):
self.assertEqual(value.setup('TEST'), 'http://spam.eggs')
with env(DJANGO_TEST='httb://spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_ip_values(self):
value = IPValue('0.0.0.0')
with env(DJANGO_TEST='127.0.0.1'):
self.assertEqual(value.setup('TEST'), '127.0.0.1')
with env(DJANGO_TEST='::1'):
self.assertEqual(value.setup('TEST'), '::1')
with env(DJANGO_TEST='spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_regex_values(self):
value = RegexValue('000--000', regex=r'\d+--\d+')
with env(DJANGO_TEST='123--456'):
self.assertEqual(value.setup('TEST'), '123--456')
with env(DJANGO_TEST='123456'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_with_check(self):
value = PathValue()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.