text stringlengths 4 1.02M | meta dict |
|---|---|
"""
This module provides functions for generating libxml2 documents (xmlDoc).
Constructors must receive a Response object and return a xmlDoc object.
"""
import libxml2
xml_parser_options = libxml2.XML_PARSE_RECOVER + \
libxml2.XML_PARSE_NOERROR + \
libxml2.XML_PARSE_NOWARNING
html_parser_options = libxml2.HTML_PARSE_RECOVER + \
libxml2.HTML_PARSE_NOERROR + \
libxml2.HTML_PARSE_NOWARNING
utf8_encodings = set(('utf-8', 'UTF-8', 'utf8', 'UTF8'))
def body_as_utf8(response):
if response.encoding in utf8_encodings:
return response.body
else:
return response.body_as_unicode().encode('utf-8')
def xmlDoc_from_html(response):
"""Return libxml2 doc for HTMLs"""
utf8body = body_as_utf8(response) or ' '
try:
lxdoc = libxml2.htmlReadDoc(utf8body, response.url, 'utf-8', \
html_parser_options)
except TypeError: # libxml2 doesn't parse text with null bytes
lxdoc = libxml2.htmlReadDoc(utf8body.replace("\x00", ""), response.url, \
'utf-8', html_parser_options)
return lxdoc
def xmlDoc_from_xml(response):
"""Return libxml2 doc for XMLs"""
utf8body = body_as_utf8(response) or ' '
try:
lxdoc = libxml2.readDoc(utf8body, response.url, 'utf-8', \
xml_parser_options)
except TypeError: # libxml2 doesn't parse text with null bytes
lxdoc = libxml2.readDoc(utf8body.replace("\x00", ""), response.url, \
'utf-8', xml_parser_options)
return lxdoc
| {
"content_hash": "c395e77363b7bcfef09b42b8ab05540c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 35.17777777777778,
"alnum_prop": 0.6279216677195198,
"repo_name": "ndemir/scrapy",
"id": "44dc4f94f4d5becebe12a58735029aeae0e79bdd",
"size": "1583",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "scrapy/selector/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1009598"
},
{
"name": "Shell",
"bytes": "4928"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Iterable
from pants.backend.cc.lint.clangformat.subsystem import ClangFormat
from pants.backend.cc.target_types import CCSourceField
from pants.backend.python.util_rules.pex import Pex, PexProcess, PexRequest
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.partitions import PartitionerType
from pants.engine.fs import Digest, MergeDigests
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, Rule, collect_rules, rule
from pants.engine.target import FieldSet
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ClangFormatFmtFieldSet(FieldSet):
required_fields = (CCSourceField,)
sources: CCSourceField
class ClangFormatRequest(FmtTargetsRequest):
field_set_type = ClangFormatFmtFieldSet
tool_subsystem = ClangFormat
partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
@rule(level=LogLevel.DEBUG)
async def clangformat_fmt(request: ClangFormatRequest.Batch, clangformat: ClangFormat) -> FmtResult:
# Look for any/all of the clang-format configuration files (recurse sub-dirs)
config_files_get = Get(
ConfigFiles,
ConfigFilesRequest,
clangformat.config_request(request.snapshot.dirs),
)
clangformat_pex, config_files = await MultiGet(
Get(Pex, PexRequest, clangformat.to_pex_request()), config_files_get
)
# Merge source files, config files, and clang-format pex process
input_digest = await Get(
Digest,
MergeDigests(
[
request.snapshot.digest,
config_files.snapshot.digest,
clangformat_pex.digest,
]
),
)
result = await Get(
ProcessResult,
PexProcess(
clangformat_pex,
argv=(
"--style=file", # Look for .clang-format files
"--fallback-style=webkit", # Use WebKit if there is no config file
"-i", # In-place edits
"--Werror", # Formatting warnings as errors
*clangformat.args, # User-added arguments
*request.files,
),
input_digest=input_digest,
output_files=request.files,
description=f"Run clang-format on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result, strip_chroot_path=True)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
*ClangFormatRequest.rules(),
)
| {
"content_hash": "c7e3efb1e9382ab737993d055cb8dd46",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 100,
"avg_line_length": 32.96590909090909,
"alnum_prop": 0.6780420544639779,
"repo_name": "pantsbuild/pants",
"id": "516e1d07faeb6ea444f725436f0cc663f39d8466",
"size": "3033",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/cc/lint/clangformat/rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
import zmq
class Sender():
def __init__(self):
self.port = '5555'
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PAIR)
self.socket.connect("tcp://localhost:%s" % self.port)
def send(self, command):
try:
self.socket.send(command, flags=zmq.NOBLOCK)
except zmq.ZMQError as e:
pass | {
"content_hash": "cb4423d114c71d08f14d014e00079852",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 61,
"avg_line_length": 25.4,
"alnum_prop": 0.5695538057742782,
"repo_name": "uberspaceguru/GiantTetris",
"id": "c50815b9eee3f35000c96b08b540fdf599199f32",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tetris_web/app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "3785"
},
{
"name": "CSS",
"bytes": "450"
},
{
"name": "Python",
"bytes": "8871"
}
],
"symlink_target": ""
} |
def default_getter(attribute=None):
"""a default method for missing renderer method
for example, the support to write data in a specific file type
is missing but the support to read data exists
"""
def none_presenter(_, **__):
"""docstring is assigned a few lines down the line"""
raise NotImplementedError("%s getter is not defined." % attribute)
none_presenter.__doc__ = "%s getter is not defined." % attribute
return none_presenter
def default_setter(attribute=None):
"""a default method for missing parser method
for example, the support to read data in a specific file type
is missing but the support to write data exists
"""
def none_importer(_x, _y, **_z):
"""docstring is assigned a few lines down the line"""
raise NotImplementedError("%s setter is not defined." % attribute)
none_importer.__doc__ = "%s setter is not defined." % attribute
return none_importer
def make_a_property(
cls,
attribute,
doc_string,
getter_func=default_getter,
setter_func=default_setter,
):
"""
create custom attributes for each class
"""
getter = getter_func(attribute)
setter = setter_func(attribute)
attribute_property = property(
# note:
# without fget, fset, pypy 5.4.0 crashes randomly.
fget=getter,
fset=setter,
doc=doc_string,
)
if "." in attribute:
attribute = attribute.replace(".", "_")
else:
attribute = attribute
setattr(cls, attribute, attribute_property)
setattr(cls, "get_%s" % attribute, getter)
setattr(cls, "set_%s" % attribute, setter)
| {
"content_hash": "109f6632bd50384561b4a57225377463",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 74,
"avg_line_length": 29.732142857142858,
"alnum_prop": 0.6408408408408408,
"repo_name": "chfw/pyexcel",
"id": "ad08709330840db0fc7d0bd27d393017de1900c0",
"size": "1665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyexcel/internal/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "455"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "403196"
},
{
"name": "Shell",
"bytes": "566"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from mox3.mox import IsA # noqa
from openstack_dashboard.test import helpers as test
from nec_portal.dashboards.admin.links import fixture
from nec_portal.dashboards.admin.links import views
INDEX_URL = reverse('horizon:admin:links:index')
class LinksViewTests(test.BaseAdminViewTests):
"""A test of the screen of links's index.
CheckPoint 1. A expected template is used.
CheckPoint 2. A expected context is returned.
"""
@test.create_stubs({views.IndexView: ('_get_admin_links',)})
@test.create_stubs({views.IndexView: ('_get_user_roles',)})
def test_links(self):
views.IndexView._get_user_roles().AndReturn(fixture.ROLES)
views.IndexView._get_admin_links().AndReturn(fixture.GET_LINKS)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/links/_index.html')
self.assertEqual(res.context['link_list'], fixture.RESULT_LINKS)
| {
"content_hash": "0c1ea3ea86abbf0c4e4901ff2f23484a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 33.12903225806452,
"alnum_prop": 0.6893865628042843,
"repo_name": "NECCSiPortal/NECCSPortal-dashboard",
"id": "4ee807408eeb294e8f1f70ba74c6653a9a92b8e3",
"size": "1595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nec_portal/dashboards/admin/links/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44207"
},
{
"name": "HTML",
"bytes": "19750"
},
{
"name": "Python",
"bytes": "131623"
},
{
"name": "Shell",
"bytes": "18954"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="side",
parent_name="layout.coloraxis.colorbar.title",
**kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
| {
"content_hash": "e8bec0a01cec045c6dfbcedc6f30ef89",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.5671902268760908,
"repo_name": "plotly/python-api",
"id": "efd98fe30662de3c57a760d10750fa2a2632bb3f",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/title/_side.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from atados_core.models import Nonprofit, Project, User, Address, Role, Work, Job, City, AddressProject, Volunteer, Apply, Question, Banner, GoogleAddress, UploadedImage, Newsletter
from atados_core.models import JobDate
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.admin.util import lookup_field
from django.utils.html import strip_tags
from django.contrib import messages
from pyExcelerator import Workbook
from atados import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.html import format_html
from django.core.urlresolvers import reverse
from django import forms
class UserInline(admin.TabularInline):
model = User
class ProjectInline(admin.TabularInline):
model = Project
class WorkInline(admin.TabularInline):
model = Work
class JobInline(admin.TabularInline):
model = Job
class JobDateInline(admin.TabularInline):
model = JobDate
OvpModelAdmin = admin.ModelAdmin
OvpModelAdmin.display_on_main_menu = False
class UserBasedMixin:
def email(self, obj):
return obj.user.email
email.short_description = "Email"
def phone(self, obj):
return obj.user.phone
phone.short_description = _('Phone')
def city_state(self, obj):
user = obj.user
if user.address:
return user.address.get_city_state()
elif user.googleaddress:
return user.googleaddress.get_city_state()
else:
return _('(Undefined)')
city_state.short_description = _('City, State')
def user_url(self, obj):
if obj.user_id:
user_change_url = reverse("admin:{}_{}_change".format(obj.user._meta.app_label, obj.user._meta.model_name), args=(obj.user_id,))
return format_html(u"<a href='{}' target='_blank'>{} [# {}]</a>", user_change_url, _('Alterar'), obj.user_id)
else:
return _('Non related')
user_url.short_description = _('User')
user_url.allow_tags = True
user_url.admin_order_field = "user__name"
def is_email_verified(self, obj):
return obj.user.is_email_verified
is_email_verified.short_description = "Email Verificado"
def user_hidden_address(self, obj):
return obj.user.hidden_address
user_hidden_address.short_description = "Esconder Endereço?"
user_url.admin_order_field = "user__hidden_address"
class NonprofitAdminForm(forms.ModelForm):
user_hidden_address = forms.BooleanField(label=_("Endereco escondido."), required=False)
class Meta:
model = Nonprofit
fields = []
widgets = {
'description': forms.Textarea(attrs={'cols': 80, 'rows': 3}),
}
class NonprofitAdmin(OvpModelAdmin, UserBasedMixin):
form = NonprofitAdminForm
# removed from fields: 'image', 'image_tag', 'cover', 'cover_tag'
fields = [
('id', 'user_url'), 'owner', 'name', 'person_name', 'url', 'change_address', 'user_hidden_address', 'description', 'details', ('published', 'deleted', 'highlighted'), 'visit_status', ('uploaded_image', 'uploaded_image_tag'),
('uploaded_cover', 'uploaded_cover_tag'), 'causes',
'website', 'facebook_page'#, 'google_page', 'twitter_handle'
]
list_display = ['id', 'created_date_proxy', 'name', 'email', 'phone', 'city_state', 'published', 'deleted', 'highlighted', 'modified_date_proxy', 'visit_status']
list_filter = ('published', 'deleted', 'visit_status')
list_editable = ['published', 'deleted', 'highlighted', 'visit_status']
search_fields = ['name', 'owner__email']
actions = ['make_published']
readonly_fields = ['id', 'url', 'user_url', 'change_address', 'image_tag', 'cover_tag', 'uploaded_image_tag', 'uploaded_cover_tag']
filter_horizontal = ('causes',)
def uploaded_image_tag(self, obj):
return u'<div><img style="max-width: 100%" src="{}" /></div>'.format(obj.get_image_url())
uploaded_image_tag.short_description = 'Logo 200x200'
uploaded_image_tag.allow_tags = True
def uploaded_cover_tag(self, obj):
return u'<div><img style="max-width: 100%" src="{}" /></div>'.format(obj.get_cover_url())
uploaded_cover_tag.short_description = 'Cover 1450x340'
uploaded_cover_tag.allow_tags = True
def created_date_proxy(self, obj):
return obj.created_date.strftime('%d/%m/%Y')
created_date_proxy.short_description = "Criada em"
created_date_proxy.admin_order_field = 'created_date'
def modified_date_proxy(self, obj):
return obj.modified_date.strftime('%d/%m/%Y')
modified_date_proxy.short_description = "Modificada em"
modified_date_proxy.admin_order_field = 'modified_date'
#-def visit_status_proxy(self, obj):
#- return obj.get_visit_status_display()
#-visit_status_proxy.short_description = _('Status de Visita')
def url(self, obj):
return format_html(u"<a href='https://www.atados.com.br/ong/{0}' target='_blank'>Clique para ver ong no site</a>", obj.user.slug)
def change_address(self, obj):
nonprofit_user = obj.user
if nonprofit_user.googleaddress:
return format_html(u"<a href='/admin/atados_core/googleaddress/{}/' target='_blank'>{}</a>", obj.user.googleaddress.id, obj.user.googleaddress)
elif nonprofit_user.address:
return format_html(u"<a href='/admin/atados_core/address/{}/' target='_blank'>{}</a>", nonprofit_user.address.id, nonprofit_user.address)
else:
return format_html(u"<a href='/admin/atados_core/googleaddress/add/' target='_blank'>Cadastrar</a>")
change_address.short_description = 'Endereço:'
def make_published(self, request, queryset):
queryset.update(published=True)
make_published.short_description = _("Mark selected Nonprofits as published")
def get_form(self, request, obj=None, **kwargs):
form = super(NonprofitAdmin, self).get_form(request, obj, **kwargs)
try:
form.base_fields['user_hidden_address'].initial = obj.user.hidden_address
except (AttributeError):
pass
return form
def save_model(self, request, obj, form, change):
if form.cleaned_data.get('user_hidden_address', None):
obj.user.hidden_address = form.cleaned_data['user_hidden_address']
obj.user.save()
super(NonprofitAdmin, self).save_model(request, obj, form, change)
class AddressAdmin(OvpModelAdmin):
fields = ['city', 'addressline', 'addressline2', 'addressnumber', 'neighborhood', 'zipcode', ('latitude', 'longitude')]
readonly_fields = ['id']
raw_id_fields = ['city']
related_lookup_fields = {
'city': ['city'],
}
list_display = ['id', 'object', 'addressline', 'addressnumber', 'neighborhood', 'city', 'zipcode', 'latitude', 'longitude']
search_fields = ['id', 'addressline']
def object(self, obj):
try:
project = obj.project
return "%s" % ("(Ato) ", project)
except:
try:
user = obj.user
try:
user.nonprofit
return u"(ONG) %s" % user
except:
return u"(Voluntário) %s" % user
except:
company = obj.company
return "%s" % ("(Empresa) ", company)
class GoogleAddressAdmin(OvpModelAdmin):
fields = ['typed_address', 'typed_address2']
readonly_fields = ['address_line']
list_display = ['address_line', 'typed_address', 'typed_address2']
search_fields = ['address_line', 'typed_address', 'typed_address2']
class CityAdmin(OvpModelAdmin):
list_display = ('id', 'name', 'state', 'active', 'highlight')
search_fields = ['name', 'id']
list_filter = ['active']
class RoleAdmin(OvpModelAdmin):
list_display = ('id', 'name', 'prerequisites', 'details', 'vacancies')
search_fields = ['name', 'id']
class ProjectAdminForm(forms.ModelForm):
class Meta:
model = Project
fields = []
widgets = {
'description': forms.Textarea(attrs={'cols': 80, 'rows': 3}),
}
class ProjectAdmin(OvpModelAdmin):
form = ProjectAdminForm
fields = [('id', 'url'), ('work', 'job'), ('name', 'slug'),
'nonprofit', 'description', 'details', 'change_address', ('highlighted', 'coral', 'gdd_highlighted'), ('uploaded_image', 'uploaded_image_tag'),
'responsible', 'phone', 'email',
('published', 'closed', 'deleted', 'email_status', 'post_fb', 'news'),
'roles', 'skills', 'causes']
list_display = ('id', 'created_date', 'name', 'nonprofit_name', 'city', 'work_or_job', 'highlighted', 'published', 'closed', 'deleted', 'post_fb', 'news', 'email_status')
list_filter = ['published', 'deleted', 'closed', 'email_status']
list_editable = ['published', 'closed', 'email_status', 'highlighted', 'post_fb', 'news', 'deleted']
search_fields = ['name', 'slug', 'nonprofit__name']
readonly_fields = ['id', 'url', 'work', 'job', 'change_address', 'uploaded_image_tag']
filter_horizontal = ('roles', 'skills', 'causes')
raw_id_fields = ['nonprofit']
def job_admin_url(self, job_obj):
return reverse("admin:%s_%s_change" % (job_obj._meta.app_label, job_obj._meta.model_name), args=(job_obj.id,))
def job(self, obj):
if obj.has_job():
return u'{} [<a href="{}" target="_blank">Alterar</a>]'.format(obj.job, self.job_admin_url(obj.job))
else:
return u"Não Pontual"
job.short_description = 'Ato Pontual'
job.allow_tags = True
def work_admin_url(self, work_obj):
return reverse("admin:%s_%s_change" % (work_obj._meta.app_label, work_obj._meta.model_name), args=(work_obj.id,))
def work(self, obj):
if obj.has_work():
return u'{} [<a href="{}" target="_blank">Alterar</a>]'.format(obj.work, self.work_admin_url(obj.work))
else:
return u"Não Recorrente"
work.short_description = 'Ato Recorrente'
work.allow_tags = True
def uploaded_image_tag(self, obj):
return u'<img style="max-width: 100%" src="{}" />'.format(obj.get_image_url())
uploaded_image_tag.short_description = 'Imagem'
uploaded_image_tag.allow_tags = True
def nonprofit_name(self, obj):
return obj.nonprofit.name
def work_or_job(self, obj):
if obj.has_work():
return 'Recorrente'
elif obj.has_job():
return 'Pontual'
else:
return 'Nenhum'
work_or_job.short_description = _('Work or Job')
def city(self, obj):
if obj.address:
return obj.address.get_city_state()
elif obj.googleaddress:
return obj.googleaddress.get_city_state()
else:
return u"Não Cadastrado"
def change_address(self, obj):
if obj.googleaddress:
return format_html(u"<a href='/admin/atados_core/googleaddress/{}/' target='_blank'>{}</a>", obj.googleaddress.id, obj.googleaddress)
elif obj.address:
return format_html(u"<a href='/admin/atados_core/address/{}/' target='_blank'>{}</a>", obj.address.id, obj.address)
else:
return format_html(u"<a href='/admin/atados_core/googleaddress/add/' target='_blank'>Cadastrar</a>")
change_address.short_description = 'Endereço:'
def url(self, obj):
return format_html(u"<a href='https://www.atados.com.br/ato/{0}' target='_blank'>Clique para ver ato no site</a>", obj.slug)
def save_model(self, request, obj, form, change):
obj.image_small = obj.image
obj.image_medium = obj.image
obj.image_large = obj.image
obj.save()
class JobAdmin(OvpModelAdmin):
exclude=['dates']
list_display = ['id', 'project', 'start_date', 'end_date']
search_fields = ['id', 'project__name', 'project__nonprofit__name']
inlines = (
JobDateInline,
)
class JobDateAdmin(OvpModelAdmin):
list_display = ['id', 'start_date', 'end_date']
raw_id_fields = ['job']
class WorkAdmin(OvpModelAdmin):
list_display = ['id', 'project', 'weekly_hours', 'can_be_done_remotely']
search_fields = ['id', 'project__name', 'project__nonprofit__name']
filter_horizontal = ['availabilities']
class UserAdmin(OvpModelAdmin):
fields = ('name', 'slug', 'email', 'phone', 'googleaddress', 'is_staff', 'is_email_verified', 'hidden_address')
list_display = ('slug', 'email', 'name', 'last_login', 'address', 'is_staff', 'is_email_verified', 'hidden_address')
list_filter = ('last_login', 'joined_date')
list_editable = ['is_staff', 'is_email_verified']
search_fields = ['email', 'slug']
#raw_id_fields = ['googleaddress']
class VolunteerAdmin(OvpModelAdmin, UserBasedMixin):
fields = ['user_url', 'facebook_url', 'birthDate', 'image', ('created_date', 'modified_date')]
list_display = ['id', 'name', 'email', 'phone', 'city_state', 'is_email_verified', 'is_staff']
filter_horizontal = ('causes','skills')
search_fields = ['user__name', 'user__email']
readonly_fields = ['user_url', 'created_date', 'modified_date', 'facebook_url']
#inlines = (
# UserInline,
#)
def name(self, obj):
return obj.user.name
def is_staff(self, obj):
return obj.user.is_staff
def facebook_url(self, obj):
user = obj.user
if user.facebook_uid is not None and len(user.facebook_uid) > 0:
return u'<a href="https://www.facebook.com/{}" target="_blank">acessar</a>'.format(user.facebook_uid)
else:
return u'Perfil não sincronizado.'
def image_tag(self, obj):
image_url = obj.get_image_url(self)
if image_url:
return u'<div><img style="max-width: 100%" src="{}" /></div>'.format(image_url)
else:
return ""
class ApplyAdmin(OvpModelAdmin):
fields = ['id', 'project', 'volunteer', 'status', 'canceled', 'canceled_date', 'date']
readonly_fields = ['id', 'project', 'volunteer', 'date']
search_fields = ['email']
list_display = ['id', 'nonprofit_name', 'project_name', 'date', 'status', 'volunteer_name', 'volunteer_email', 'volunteer_phone']
def nonprofit_name(self, obj):
return obj.project.nonprofit.name
def project_name(self, obj):
return obj.project.name
def volunteer_name(self, obj):
return obj.volunteer.user.name
def volunteer_email(self, obj):
return obj.volunteer.user.email
def volunteer_phone(self, obj):
return obj.volunteer.user.phone
class AddressProjectAdmin(OvpModelAdmin):
fields = ['name', 'slug', 'googleaddress']
list_display = ('id', 'name', 'slug', 'nonprofit', 'googleaddress')
raw_id_fields = ['address']
def get_queryset(self, request):
return self.model.objects.all()
class QuestionAdmin(OvpModelAdmin):
fields = ['id', 'name', 'description', 'priority', 'screenshot']
readonly_fields = ['id']
list_display = ['id', 'name', 'description']
class BannerAdmin(OvpModelAdmin):
list_display = ['id', 'title', 'text', 'link', 'link_text', 'order', 'active']
search_fields = ['link', 'text']
list_editable = ['order', 'active']
readonly_fields = ['id']
class UploadedImageAdmin(OvpModelAdmin):
fields = ['id', 'image', 'image_small', 'image_medium', 'image_large']
list_display = ['id', 'image']
readonly_fields = ['id', 'image_small', 'image_medium', 'image_large']
class NewsletterAdmin(OvpModelAdmin):
list_display = ['id', 'name', 'email', 'googleaddress']
search_fields = ['id', 'name', 'email', 'googleaddress']
#
QuestionAdmin.display_on_main_menu = False
AddressProjectAdmin.display_on_main_menu = False
AddressAdmin.display_on_main_menu = False
UserAdmin.display_on_main_menu = False
RoleAdmin.display_on_main_menu = False
WorkAdmin.display_on_main_menu = False
JobAdmin.display_on_main_menu = False
CityAdmin.display_on_main_menu = False
GoogleAddressAdmin.display_on_main_menu = False
UploadedImageAdmin.display_on_main_menu = False
#
VolunteerAdmin.display_on_main_menu = True
ProjectAdmin.display_on_main_menu = True
ApplyAdmin.display_on_main_menu = True
NonprofitAdmin.display_on_main_menu = True
BannerAdmin.display_on_main_menu = True
NewsletterAdmin.display_on_main_menu = True
########
admin.site.register(Question, QuestionAdmin)
admin.site.register(Nonprofit, NonprofitAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(AddressProject, AddressProjectAdmin)
admin.site.register(Address, AddressAdmin)
admin.site.register(User, UserAdmin)
admin.site.register(Volunteer, VolunteerAdmin)
admin.site.register(Apply, ApplyAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Work, WorkAdmin)
admin.site.register(JobDate, JobDateAdmin)
admin.site.register(Job, JobAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(Banner, BannerAdmin)
admin.site.register(GoogleAddress, GoogleAddressAdmin)
admin.site.register(UploadedImage, UploadedImageAdmin)
admin.site.register(Newsletter, NewsletterAdmin)
def export_emails(modeladmin, request, queryset):
"""
Generic xls export admin action.
"""
if queryset.count() > settings.EXPORT_RECORDS_LIMIT:
messages.error(request, "Can't export more then %s Records in one go. Narrow down your criteria using filters or search" % str(settings.EXPORT_RECORDS_LIMIT))
return HttpResponseRedirect(request.path_info)
fields = []
#PUT THE LIST OF FIELD NAMES YOU DON'T WANT TO EXPORT
#exclude_fields = ['password', 'id', 'last_login', 'slug', 'is_staff', 'is_email_verified', 'is_active', 'joined_date', 'modified_date', 'address_id', 'phone', 'legacy_uid', 'hidden_address', 'name', 'company_id', 'token', 'site']
#foreign key related fields
extras = []
if not request.user.is_staff:
raise PermissionDenied
for f in modeladmin.list_display:
if f == 'email':
fields.append(f)
opts = modeladmin.model._meta
wb = Workbook()
ws0 = wb.add_sheet('0')
col = 0
field_names = []
# write header row
for field in fields:
ws0.write(0, col, field)
field_names.append(field)
col = col + 1
row = 1
# Write data rows
for obj in queryset:
col = 0
for field in field_names:
if field in extras:
try:
val = [eval('obj.'+field)] #eval sucks but easiest way to deal
except :
val = ['None']
else:
try:
val = lookup_field(field, obj, modeladmin)
except :
val = ['None']
if not val[-1] == None:
if isinstance(val[-1], bool):
ws0.write(row, col, strip_tags(str(val[-1])))
elif not isinstance(val[-1], str) and not isinstance(val[-1], unicode):
ws0.write(row, col, strip_tags(val[-1].__unicode__()))
elif val[-1]:
ws0.write(row, col, strip_tags(val[-1]))
else:
ws0.write(row, col, strip_tags(''))
col = col + 1
row = row + 1
wb.save('/tmp/output.xls')
response = HttpResponse(open('/tmp/output.xls','r').read(),
content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename=%s.xls' % unicode(opts).replace('.', '_')
return response
def export_as_xls(modeladmin, request, queryset):
"""
Generic xls export admin action.
"""
if queryset.count() > settings.EXPORT_RECORDS_LIMIT:
messages.error(request, "Can't export more then %s Records in one go. Narrow down your criteria using filters or search" % str(settings.EXPORT_RECORDS_LIMIT))
return HttpResponseRedirect(request.path_info)
fields = []
#PUT THE LIST OF FIELD NAMES YOU DON'T WANT TO EXPORT
exclude_fields = []
#foreign key related fields
extras = ['']
if not request.user.is_staff:
raise PermissionDenied
for f in modeladmin.list_display:
if f not in exclude_fields:
fields.append(f)
fields.extend(extras)
opts = modeladmin.model._meta
wb = Workbook()
ws0 = wb.add_sheet('0')
col = 0
field_names = []
# write header row
for field in fields:
ws0.write(0, col, field)
field_names.append(field)
col = col + 1
row = 1
# Write data rows
for obj in queryset:
col = 0
for field in field_names:
if field in extras:
try:
val = [eval('obj.'+field)] #eval sucks but easiest way to deal
except :
val = ['None']
else:
try:
val = lookup_field(field, obj, modeladmin)
except :
val = ['None']
if not val[-1] == None:
if isinstance(val[-1], bool):
ws0.write(row, col, strip_tags(str(val[-1])))
elif not isinstance(val[-1], str) and not isinstance(val[-1], unicode):
if type(val[-1]) == int:
ws0.write(row, col, val[-1].__str__())
else:
ws0.write(row, col, strip_tags(val[-1].__unicode__()))
elif val[-1]:
ws0.write(row, col, strip_tags(val[-1]))
else:
ws0.write(row, col, strip_tags(''))
col = col + 1
row = row + 1
wb.save('/tmp/output.xls')
response = HttpResponse(open('/tmp/output.xls','r').read(),
content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename=%s.xls' % unicode(opts).replace('.', '_')
return response
export_as_xls.short_description = _("Export selected to XLS")
export_emails.short_description = _("Export emails to XLS")
admin.site.add_action(export_as_xls)
admin.site.add_action(export_emails)
| {
"content_hash": "1374526900876abb1e39181ee86b5fd4",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 234,
"avg_line_length": 35.18905472636816,
"alnum_prop": 0.6442810688533861,
"repo_name": "atados/api",
"id": "0df86c5e82f271691225633f5748cc1482262c1a",
"size": "21251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atados_core/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31943"
},
{
"name": "HTML",
"bytes": "138142"
},
{
"name": "JavaScript",
"bytes": "5492"
},
{
"name": "Makefile",
"bytes": "1381"
},
{
"name": "Python",
"bytes": "394268"
},
{
"name": "Shell",
"bytes": "1060"
}
],
"symlink_target": ""
} |
"""
Base backends structures.
This module defines base classes needed to define custom OpenID or OAuth
auth services from third parties. This customs must subclass an Auth and
and Backend class, check current implementation for examples.
Also the modules *must* define a BACKENDS dictionary with the backend name
(which is used for URLs matching) and Auth class, otherwise it won't be
enabled.
"""
from urllib2 import Request, urlopen, HTTPError
from urllib import urlencode
from urlparse import urlsplit
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL, FAILURE
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg, ax
from oauth2 import Consumer as OAuthConsumer, Token, Request as OAuthRequest
from django.db import models
from django.contrib.auth import authenticate
from django.contrib.auth.backends import ModelBackend
from django.utils import simplejson
from django.utils.importlib import import_module
from django.utils.crypto import constant_time_compare, get_random_string
from django.middleware.csrf import CSRF_KEY_LENGTH
from social_auth.utils import setting, log, model_to_ctype, ctype_to_model, \
clean_partial_pipeline
from social_auth.store import DjangoOpenIDStore
from social_auth.backends.exceptions import StopPipeline, AuthException, \
AuthFailed, AuthCanceled, \
AuthUnknownError, AuthTokenError, \
AuthMissingParameter, \
AuthForbidden
from social_auth.backends.utils import build_consumer_oauth_request
if setting('SOCIAL_AUTH_USER_MODEL'):
User = models.get_model(*setting('SOCIAL_AUTH_USER_MODEL').rsplit('.', 1))
else:
from django.contrib.auth.models import User
# OpenID configuration
OLD_AX_ATTRS = [
('http://schema.openid.net/contact/email', 'old_email'),
('http://schema.openid.net/namePerson', 'old_fullname'),
('http://schema.openid.net/namePerson/friendly', 'old_nickname')
]
AX_SCHEMA_ATTRS = [
# Request both the full name and first/last components since some
# providers offer one but not the other.
('http://axschema.org/contact/email', 'email'),
('http://axschema.org/namePerson', 'fullname'),
('http://axschema.org/namePerson/first', 'first_name'),
('http://axschema.org/namePerson/last', 'last_name'),
('http://axschema.org/namePerson/friendly', 'nickname'),
]
SREG_ATTR = [
('email', 'email'),
('fullname', 'fullname'),
('nickname', 'nickname')
]
OPENID_ID_FIELD = 'openid_identifier'
SESSION_NAME = 'openid'
# key for username in user details dict used around, see get_user_details
# method
USERNAME = 'username'
PIPELINE = setting('SOCIAL_AUTH_PIPELINE', (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
))
class SocialAuthBackend(ModelBackend):
"""A django.contrib.auth backend that authenticates the user based on
a authentication provider response"""
name = '' # provider name, it's stored in database
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if not (self.name and kwargs.get(self.name) and 'response' in kwargs):
return None
response = kwargs.get('response')
pipeline = PIPELINE
kwargs = kwargs.copy()
kwargs['backend'] = self
if 'pipeline_index' in kwargs:
pipeline = pipeline[kwargs['pipeline_index']:]
else:
kwargs['details'] = self.get_user_details(response)
kwargs['uid'] = self.get_user_id(kwargs['details'], response)
kwargs['is_new'] = False
out = self.pipeline(pipeline, *args, **kwargs)
if not isinstance(out, dict):
return out
social_user = out.get('social_user')
if social_user:
# define user.social_user attribute to track current social
# account
user = social_user.user
user.social_user = social_user
user.is_new = out.get('is_new')
return user
def pipeline(self, pipeline, *args, **kwargs):
"""Pipeline"""
out = kwargs.copy()
if 'pipeline_index' in kwargs:
base_index = int(kwargs['pipeline_index'])
else:
base_index = 0
for idx, name in enumerate(pipeline):
out['pipeline_index'] = base_index + idx
mod_name, func_name = name.rsplit('.', 1)
try:
mod = import_module(mod_name)
except ImportError:
log('exception', 'Error importing pipeline %s', name)
else:
func = getattr(mod, func_name, None)
if callable(func):
try:
result = func(*args, **out) or {}
except StopPipeline:
# Clean partial pipeline on stop
if 'request' in kwargs:
clean_partial_pipeline(kwargs['request'])
break
if isinstance(result, dict):
out.update(result)
else:
return result
return out
def extra_data(self, user, uid, response, details):
"""Return default blank user extra data"""
return {}
def get_user_id(self, details, response):
"""Must return a unique ID from values returned on details"""
raise NotImplementedError('Implement in subclass')
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{USERNAME: <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
@classmethod
def tokens(cls, instance):
"""Return the tokens needed to authenticate the access to any API the
service might provide. The return value will be a dictionary with the
token type name as key and the token value.
instance must be a UserSocialAuth instance.
"""
if instance.extra_data and 'access_token' in instance.extra_data:
return {
'access_token': instance.extra_data['access_token']
}
else:
return {}
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend
"""
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class OAuthBackend(SocialAuthBackend):
"""OAuth authentication backend base class.
EXTRA_DATA defines a set of name that will be stored in
extra_data field. It must be a list of tuples with
name and alias.
Also settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current backend
name (all uppercase) plus _EXTRA_DATA.
access_token is always stored.
"""
EXTRA_DATA = None
def get_user_id(self, details, response):
"""OAuth providers return an unique user id in response"""
return response['id']
def extra_data(self, user, uid, response, details):
"""Return access_token and extra defined names to store in
extra_data field"""
data = {'access_token': response.get('access_token', '')}
name = self.name.replace('-', '_').upper()
names = (self.EXTRA_DATA or []) + setting(name + '_EXTRA_DATA', [])
for entry in names:
if len(entry) == 2:
(name, alias), discard = entry, False
elif len(entry) == 3:
name, alias, discard = entry
elif len(entry) == 1:
name = alias = entry
else: # ???
continue
value = response.get(name)
if discard and not value:
continue
data[alias] = value
return data
class OpenIDBackend(SocialAuthBackend):
"""Generic OpenID authentication backend"""
name = 'openid'
def get_user_id(self, details, response):
"""Return user unique id provided by service"""
return response.identity_url
def values_from_response(self, response, sreg_names=None, ax_names=None):
"""Return values from SimpleRegistration response or
AttributeExchange response if present.
@sreg_names and @ax_names must be a list of name and aliases
for such name. The alias will be used as mapping key.
"""
values = {}
# Use Simple Registration attributes if provided
if sreg_names:
resp = sreg.SRegResponse.fromSuccessResponse(response)
if resp:
values.update((alias, resp.get(name) or '')
for name, alias in sreg_names)
# Use Attribute Exchange attributes if provided
if ax_names:
resp = ax.FetchResponse.fromSuccessResponse(response)
if resp:
for src, alias in ax_names:
name = alias.replace('old_', '')
values[name] = resp.getSingle(src, '') or values.get(name)
return values
def get_user_details(self, response):
"""Return user details from an OpenID request"""
values = {USERNAME: '', 'email': '', 'fullname': '',
'first_name': '', 'last_name': ''}
# update values using SimpleRegistration or AttributeExchange
# values
values.update(self.values_from_response(response,
SREG_ATTR,
OLD_AX_ATTRS + \
AX_SCHEMA_ATTRS))
fullname = values.get('fullname') or ''
first_name = values.get('first_name') or ''
last_name = values.get('last_name') or ''
if not fullname and first_name and last_name:
fullname = first_name + ' ' + last_name
elif fullname:
try: # Try to split name for django user storage
first_name, last_name = fullname.rsplit(' ', 1)
except ValueError:
last_name = fullname
values.update({'fullname': fullname, 'first_name': first_name,
'last_name': last_name,
USERNAME: values.get(USERNAME) or \
(first_name.title() + last_name.title())})
return values
def extra_data(self, user, uid, response, details):
"""Return defined extra data names to store in extra_data field.
Settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current
backend name (all uppercase) plus _SREG_EXTRA_DATA and
_AX_EXTRA_DATA because values can be returned by SimpleRegistration
or AttributeExchange schemas.
Both list must be a value name and an alias mapping similar to
SREG_ATTR, OLD_AX_ATTRS or AX_SCHEMA_ATTRS
"""
name = self.name.replace('-', '_').upper()
sreg_names = setting(name + '_SREG_EXTRA_DATA')
ax_names = setting(name + '_AX_EXTRA_DATA')
data = self.values_from_response(response, sreg_names, ax_names)
return data
class BaseAuth(object):
"""Base authentication class, new authenticators should subclass
and implement needed methods.
AUTH_BACKEND Authorization backend related with this service
"""
AUTH_BACKEND = None
def __init__(self, request, redirect):
self.request = request
# Use request because some auth providers use POST urls with needed
# GET parameters on it
self.data = request.REQUEST
self.redirect = redirect
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def to_session_dict(self, next_idx, *args, **kwargs):
"""Returns dict to store on session for partial pipeline."""
return {
'next': next_idx,
'backend': self.AUTH_BACKEND.name,
'args': tuple(map(model_to_ctype, args)),
'kwargs': dict((key, model_to_ctype(val))
for key, val in kwargs.iteritems())
}
def from_session_dict(self, entry, *args, **kwargs):
"""Takes session saved entry to continue pipeline and merges with
any new extra argument needed. Returns tuple with next pipeline
index entry, arguments and keyword arguments to continue the
process."""
args = args[:] + tuple(map(ctype_to_model, entry['args']))
kwargs = kwargs.copy()
kwargs.update((key, ctype_to_model(val))
for key, val in entry['kwargs'].iteritems())
return (entry['next'], args, kwargs)
def continue_pipeline(self, *args, **kwargs):
"""Continue previous halted pipeline"""
kwargs.update({
'auth': self,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def request_token_extra_arguments(self):
"""Return extra arguments needed on request-token process,
setting is per backend and defined by:
<backend name in uppercase>_REQUEST_TOKEN_EXTRA_ARGUMENTS.
"""
backend_name = self.AUTH_BACKEND.name.upper().replace('-', '_')
return setting(backend_name + '_REQUEST_TOKEN_EXTRA_ARGUMENTS', {})
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process, setting is per
backend and defined by:
<backend name in uppercase>_AUTH_EXTRA_ARGUMENTS.
"""
backend_name = self.AUTH_BACKEND.name.upper().replace('-', '_')
return setting(backend_name + '_AUTH_EXTRA_ARGUMENTS', {})
@property
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
@classmethod
def enabled(cls):
"""Return backend enabled status, all enabled by default"""
return True
def disconnect(self, user, association_id=None):
"""Deletes current backend from user if associated.
Override if extra operations are needed.
"""
if association_id:
user.social_auth.get(id=association_id).delete()
else:
user.social_auth.filter(provider=self.AUTH_BACKEND.name).delete()
def build_absolute_uri(self, path=None):
"""Build absolute URI for given path. Replace http:// schema with
https:// if SOCIAL_AUTH_REDIRECT_IS_HTTPS is defined.
"""
uri = self.request.build_absolute_uri(path)
if setting('SOCIAL_AUTH_REDIRECT_IS_HTTPS'):
uri = uri.replace('http://', 'https://')
return uri
class OpenIdAuth(BaseAuth):
"""OpenId process handling"""
AUTH_BACKEND = OpenIDBackend
def auth_url(self):
"""Return auth URL returned by service"""
openid_request = self.setup_request(self.auth_extra_arguments())
# Construct completion URL, including page we should redirect to
return_to = self.build_absolute_uri(self.redirect)
return openid_request.redirectURL(self.trust_root(), return_to)
def auth_html(self):
"""Return auth HTML returned by service"""
openid_request = self.setup_request(self.auth_extra_arguments())
return_to = self.build_absolute_uri(self.redirect)
form_tag = {'id': 'openid_message'}
return openid_request.htmlMarkup(self.trust_root(), return_to,
form_tag_attrs=form_tag)
def trust_root(self):
"""Return trust-root option"""
return setting('OPENID_TRUST_ROOT') or self.build_absolute_uri('/')
def continue_pipeline(self, *args, **kwargs):
"""Continue previous halted pipeline"""
response = self.consumer().complete(dict(self.data.items()),
self.build_absolute_uri())
kwargs.update({
'auth': self,
'response': response,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Complete auth process"""
response = self.consumer().complete(dict(self.data.items()),
self.build_absolute_uri())
if not response:
raise AuthException(self, 'OpenID relying party endpoint')
elif response.status == SUCCESS:
kwargs.update({
'auth': self,
'response': response,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
elif response.status == FAILURE:
raise AuthFailed(self, response.message)
elif response.status == CANCEL:
raise AuthCanceled(self)
else:
raise AuthUnknownError(self, response.status)
def setup_request(self, extra_params=None):
"""Setup request"""
openid_request = self.openid_request(extra_params)
# Request some user details. Use attribute exchange if provider
# advertises support.
if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri):
fetch_request = ax.FetchRequest()
# Mark all attributes as required, Google ignores optional ones
for attr, alias in (AX_SCHEMA_ATTRS + OLD_AX_ATTRS):
fetch_request.add(ax.AttrInfo(attr, alias=alias,
required=True))
else:
fetch_request = sreg.SRegRequest(optional=dict(SREG_ATTR).keys())
openid_request.addExtension(fetch_request)
return openid_request
def consumer(self):
"""Create an OpenID Consumer object for the given Django request."""
return Consumer(self.request.session.setdefault(SESSION_NAME, {}),
DjangoOpenIDStore())
@property
def uses_redirect(self):
"""Return true if openid request will be handled with redirect or
HTML content will be returned.
"""
return self.openid_request().shouldSendRedirect()
def openid_request(self, extra_params=None):
"""Return openid request"""
openid_url = self.openid_url()
if extra_params:
query = urlsplit(openid_url).query
openid_url += (query and '&' or '?') + urlencode(extra_params)
try:
return self.consumer().begin(openid_url)
except DiscoveryFailure, err:
raise AuthException(self, 'OpenID discovery error: %s' % err)
def openid_url(self):
"""Return service provider URL.
This base class is generic accepting a POST parameter that specifies
provider URL."""
if OPENID_ID_FIELD not in self.data:
raise AuthMissingParameter(self, OPENID_ID_FIELD)
return self.data[OPENID_ID_FIELD]
class BaseOAuth(BaseAuth):
"""OAuth base class"""
SETTINGS_KEY_NAME = ''
SETTINGS_SECRET_NAME = ''
def __init__(self, request, redirect):
"""Init method"""
super(BaseOAuth, self).__init__(request, redirect)
self.redirect_uri = self.build_absolute_uri(self.redirect)
@classmethod
def get_key_and_secret(cls):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return setting(cls.SETTINGS_KEY_NAME), \
setting(cls.SETTINGS_SECRET_NAME)
@classmethod
def enabled(cls):
"""Return backend enabled status by checking basic settings"""
return setting(cls.SETTINGS_KEY_NAME) and \
setting(cls.SETTINGS_SECRET_NAME)
class ConsumerBasedOAuth(BaseOAuth):
"""Consumer based mechanism OAuth authentication, fill the needed
parameters to communicate properly with authentication service.
AUTHORIZATION_URL Authorization service url
REQUEST_TOKEN_URL Request token URL
ACCESS_TOKEN_URL Access token URL
SERVER_URL Authorization server URL
"""
AUTHORIZATION_URL = ''
REQUEST_TOKEN_URL = ''
ACCESS_TOKEN_URL = ''
SERVER_URL = ''
def auth_url(self):
"""Return redirect url"""
token = self.unauthorized_token()
name = self.AUTH_BACKEND.name + 'unauthorized_token_name'
self.request.session[name] = token.to_string()
return self.oauth_authorization_request(token).to_url()
def auth_complete(self, *args, **kwargs):
"""Return user, might be logged in"""
name = self.AUTH_BACKEND.name + 'unauthorized_token_name'
unauthed_token = self.request.session.get(name)
if not unauthed_token:
raise AuthTokenError('Missing unauthorized token')
token = Token.from_string(unauthed_token)
if token.key != self.data.get('oauth_token', 'no-token'):
raise AuthTokenError('Incorrect tokens')
try:
access_token = self.access_token(token)
except HTTPError, e:
if e.code == 400:
raise AuthCanceled(self)
else:
raise
data = self.user_data(access_token)
if data is not None:
data['access_token'] = access_token.to_string()
kwargs.update({
'auth': self,
'response': data,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def unauthorized_token(self):
"""Return request for unauthorized token (first stage)"""
request = self.oauth_request(token=None, url=self.REQUEST_TOKEN_URL,
extra_params=self.request_token_extra_arguments())
response = self.fetch_response(request)
return Token.from_string(response)
def oauth_authorization_request(self, token):
"""Generate OAuth request to authorize token."""
return OAuthRequest.from_token_and_callback(token=token,
callback=self.redirect_uri,
http_url=self.AUTHORIZATION_URL,
parameters=self.auth_extra_arguments())
def oauth_request(self, token, url, extra_params=None):
"""Generate OAuth request, setups callback url"""
return build_consumer_oauth_request(self, token, url,
self.redirect_uri,
self.data.get('oauth_verifier'),
extra_params)
def fetch_response(self, request):
"""Executes request and fetchs service response"""
response = urlopen(request.to_url())
return '\n'.join(response.readlines())
def access_token(self, token):
"""Return request for access token value"""
request = self.oauth_request(token, self.ACCESS_TOKEN_URL)
return Token.from_string(self.fetch_response(request))
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
raise NotImplementedError('Implement in subclass')
@property
def consumer(self):
"""Setups consumer"""
return OAuthConsumer(*self.get_key_and_secret())
class BaseOAuth2(BaseOAuth):
"""Base class for OAuth2 providers.
OAuth2 draft details at:
http://tools.ietf.org/html/draft-ietf-oauth-v2-10
Attributes:
AUTHORIZATION_URL Authorization service url
ACCESS_TOKEN_URL Token URL
FORCE_STATE_CHECK Ensure state argument check (check issue #386
for further details)
"""
AUTHORIZATION_URL = None
ACCESS_TOKEN_URL = None
SCOPE_SEPARATOR = ' '
RESPONSE_TYPE = 'code'
SCOPE_VAR_NAME = None
DEFAULT_SCOPE = None
FORCE_STATE_CHECK = True
def csrf_token(self):
"""Generate csrf token to include as state parameter."""
return get_random_string(CSRF_KEY_LENGTH)
def auth_url(self):
"""Return redirect url"""
client_id, client_secret = self.get_key_and_secret()
args = {'client_id': client_id, 'redirect_uri': self.redirect_uri}
if self.FORCE_STATE_CHECK:
state = self.csrf_token()
args['state'] = state
self.request.session[self.AUTH_BACKEND.name + '_state'] = state
scope = self.get_scope()
if scope:
args['scope'] = self.SCOPE_SEPARATOR.join(self.get_scope())
if self.RESPONSE_TYPE:
args['response_type'] = self.RESPONSE_TYPE
args.update(self.auth_extra_arguments())
return self.AUTHORIZATION_URL + '?' + urlencode(args)
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if self.data.get('error'):
error = self.data.get('error_description') or self.data['error']
raise AuthFailed(self, error)
if self.FORCE_STATE_CHECK:
if 'state' not in self.data:
raise AuthMissingParameter(self, 'state')
state = self.request.session[self.AUTH_BACKEND.name + '_state']
if not constant_time_compare(self.data['state'], state):
raise AuthForbidden(self)
client_id, client_secret = self.get_key_and_secret()
params = {'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': self.redirect_uri}
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
request = Request(self.ACCESS_TOKEN_URL, data=urlencode(params),
headers=headers)
try:
response = simplejson.loads(urlopen(request).read())
except HTTPError, e:
if e.code == 400:
raise AuthCanceled(self)
else:
raise
except (ValueError, KeyError):
raise AuthUnknownError(self)
if response.get('error'):
error = response.get('error_description') or response.get('error')
raise AuthFailed(self, error)
else:
data = self.user_data(response['access_token'], response)
response.update(data or {})
kwargs.update({
'auth': self,
'response': response,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def get_scope(self):
"""Return list with needed access scope"""
scope = self.DEFAULT_SCOPE or []
if self.SCOPE_VAR_NAME:
scope = scope + setting(self.SCOPE_VAR_NAME, [])
return scope
# Backend loading was previously performed via the
# SOCIAL_AUTH_IMPORT_BACKENDS setting - as it's no longer used,
# provide a deprecation warning.
if setting('SOCIAL_AUTH_IMPORT_BACKENDS'):
from warnings import warn
warn("SOCIAL_AUTH_IMPORT_SOURCES is deprecated")
# Cache for discovered backends.
BACKENDSCACHE = {}
def get_backends(force_load=False):
"""
Entry point to the BACKENDS cache. If BACKENDSCACHE hasn't been
populated, each of the modules referenced in
AUTHENTICATION_BACKENDS is imported and checked for a BACKENDS
definition and if enabled, added to the cache.
Previously all backends were attempted to be loaded at
import time of this module, which meant that backends that subclass
bases found in this module would not have the chance to be loaded
by the time they were added to this module's BACKENDS dict. See:
https://github.com/omab/django-social-auth/issues/204
This new approach ensures that backends are allowed to subclass from
bases in this module and still be picked up.
A force_load boolean arg is also provided so that get_backend
below can retry a requested backend that may not yet be discovered.
"""
if not BACKENDSCACHE or force_load:
for auth_backend in setting('AUTHENTICATION_BACKENDS'):
mod, cls_name = auth_backend.rsplit('.', 1)
module = import_module(mod)
backend = getattr(module, cls_name)
if issubclass(backend, SocialAuthBackend):
name = backend.name
backends = getattr(module, 'BACKENDS', {})
if name in backends and backends[name].enabled():
BACKENDSCACHE[name] = backends[name]
return BACKENDSCACHE
def get_backend(name, *args, **kwargs):
"""Returns a backend by name. Backends are stored in the BACKENDSCACHE
cache dict. If not found, each of the modules referenced in
AUTHENTICATION_BACKENDS is imported and checked for a BACKENDS
definition. If the named backend is found in the module's BACKENDS
definition, it's then stored in the cache for future access.
"""
try:
# Cached backend which has previously been discovered.
return BACKENDSCACHE[name](*args, **kwargs)
except KeyError:
# Force a reload of BACKENDS to ensure a missing
# backend hasn't been missed.
get_backends(force_load=True)
try:
return BACKENDSCACHE[name](*args, **kwargs)
except KeyError:
return None
BACKENDS = {
'openid': OpenIdAuth
}
| {
"content_hash": "0f9de354d6569cb94fdf1c0ab9367187",
"timestamp": "",
"source": "github",
"line_count": 817,
"max_line_length": 79,
"avg_line_length": 38.421052631578945,
"alnum_prop": 0.6006371455877668,
"repo_name": "makinacorpus/django-social-auth",
"id": "c518288b936d98d2fef1d6ca08cf49b56240caa8",
"size": "31390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_auth/backends/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Test module configuration for application 'rule'.
"""
__author__ = "Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)"
from test_models import *
| {
"content_hash": "32bac91245add2583aa84b3026b0aea5",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 64,
"avg_line_length": 25,
"alnum_prop": 0.7133333333333334,
"repo_name": "ariel17/traffic",
"id": "6a941c4abdf15c43fb2d0e74c95b6948a2825596",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traffic/rule/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16277"
},
{
"name": "Shell",
"bytes": "6461"
}
],
"symlink_target": ""
} |
from impacket.structure import Structure
from impacket.dcerpc import ndrutils
from impacket.dcerpc.samr import SAMR_RPC_SID_IDENTIFIER_AUTHORITY, SAMR_RPC_SID
from impacket.uuid import uuidtup_to_bin
from impacket.nt_errors import ERROR_MESSAGES
import random
from struct import pack, unpack
MSRPC_UUID_LSARPC = uuidtup_to_bin(('12345778-1234-ABCD-EF00-0123456789AB','0.0'))
# Constants
# POLICY_INFORMATION_CLASS
POLICY_AUDIT_LOG_INFORMATION = 1
POLICY_AUDIT_EVENTS_INFORMATION = 2
POLICY_PRIMARY_DOMAIN_INFORMATION = 3
POLICY_PD_ACCOUNT_INFORMATION = 4
POLICY_ACCOUNT_DOMAIN_INFORMATION = 5
POLICY_LSA_SERVER_ROLE_INFORMATION = 6
POLICY_REPLICA_SOURCE_INFORMATION = 7
POLICY_DEFAULT_QUOTA_INFORMATION = 8
POLICY_MODIFICATION_INFORMATION = 9
POLICY_AUDIT_FULL_SET_INFORMATION = 10
POLICY_AUDIT_FULL_QUERY_INFORMATION = 11
POLICY_DNS_DOMAIN_INFORMATION = 12
POLICY_DNS_DOMAIN_INFORMATION_INT = 13
POLICY_LOCAL_ACCOUNT_DOMAIN_INFORMATION = 14
POLICY_LAST_ENTRY = 15
# LSAP_LOOKUP_LEVEL ( [MS-LSAT] Section 2.2.16 )
LsapLookupWksta = 1
LsapLookupPDC = 2
LsapLookupTDL = 3
LsapLookupGC = 4
LsapLookupXForestReferral = 5
LsapLookupXForestResolve = 6
LsapLookupRODCReferralToFullDC = 7
# Structs
class LSAPR_CR_CIPHER_VALUE(Structure):
structure = (
('Length','<L=0'),
('MaximumLength','<L=0'),
('pBuffer',':', ndrutils.NDRPointerNew ),
('Buffer',':', ndrutils.NDRConformantVaryingArray),
)
class LSARPCQuerySecret(Structure):
opnum = 30
alignment = 4
structure = (
('SecretHandle','20s'),
('EncryptedCurrentValue','<Q=1'),
('pCurrentValueSetTime','<L=2'),
('CurrentValueSetTime','<Q=0'),
('EncryptedOldValue','L=0'),
('OldValueSetTime','<Q=0'),
)
class LSARPCQuerySecretResponse(Structure):
#alignment = 8
structure = (
('pEncryptedCurrentValue',':', ndrutils.NDRPointerNew),
('pEncryptedCurrentValue2',':', ndrutils.NDRPointerNew),
('EncryptedCurrentValue',':', LSAPR_CR_CIPHER_VALUE),
('pCurrentValueSetTime',':', ndrutils.NDRPointerNew),
('CurrentValueSetTime','<Q=0'),
)
class LSARPCRetrievePrivateData(Structure):
opnum = 43
alignment = 4
structure = (
('PolicyHandle','20s'),
('KeyName',':'),
('EncryptedData', '<L=0'),
)
class LSARPCRetrievePrivateDataResponse(Structure):
structure = (
('pEncryptedData', ':', ndrutils.NDRPointerNew),
('EncryptedData', ':', LSAPR_CR_CIPHER_VALUE),
)
class LSARPCOpenSecret(Structure):
opnum = 28
alignment = 4
structure = (
('PolicyHandle','20s'),
('SecretName',':'),
('DesiredAccess','<L=0'),
)
class LSARPCOpenSecretResponse(Structure):
structure = (
('SecretHandle', '20s'),
)
class LSARPCSetSystemAccessAccount(Structure):
opnum = 24
alignment = 4
structure = (
('AccountHandle','20s'),
('SystemAccess','<L=0'),
)
class LSARPCLookupNames2(Structure):
opnum = 58
alignment = 4
structure = (
('PolicyHandle','20s'),
('Count','<L=0'),
('SizeIs','<L=0'),
('Names',':'),
('TranslatedSids',':'),
('LookupLevel','<H=0'),
('MappedCount','<L=0'),
('LookupOptions','<L=0'),
('ClientRevision','<L=0'),
)
class RPC_SID(SAMR_RPC_SID):
commonHdr = (
('Count', '<L=0'),
)
def __init__(self, data = None, alignment = 0):
SAMR_RPC_SID.__init__(self, data)
def fromCanonical(self, canonical):
items = canonical.split('-')
self['Revision'] = int(items[1])
self['IdentifierAuthority'] = SAMR_RPC_SID_IDENTIFIER_AUTHORITY()
self['IdentifierAuthority']['Value'] = '\x00\x00\x00\x00\x00' + pack('B',int(items[2]))
self['SubAuthorityCount'] = len(items) - 3
self['Count'] = self['SubAuthorityCount']
ans = ''
for i in range(self['SubAuthorityCount']):
ans += pack('<L', int(items[i+3]))
self['SubAuthority'] = ans
class LSAPR_TRUST_INFORMATION(Structure):
structure = (
('pName',':', ndrutils.pRPC_UNICODE_STRING),
('pSid',':', ndrutils.NDRPointerNew),
('Name',':', ndrutils.RPC_UNICODE_STRING),
('Sid', ':', RPC_SID),
)
class LSAPR_REFERENCED_DOMAIN_LIST(Structure):
alignment = 4
structure = (
('Entries','<L=0'),
('pDomains','<L=0'),
('MaxEntries','<L=0'),
('Size', '<L=0'),
('Domains', ':', LSAPR_TRUST_INFORMATION),
)
class PLSAPR_REFERENCED_DOMAIN_LIST(LSAPR_REFERENCED_DOMAIN_LIST):
alignment = 4
commonHdr = (
('RefId','<L'),
)
def __init__(self, data = None, alignment = 0):
LSAPR_REFERENCED_DOMAIN_LIST.__init__(self,data, alignment)
self['RefId'] = random.randint(1,65535)
class LSAPR_TRANSLATED_SIDS_EX(Structure):
alignment = 4
structure = (
('Use', '<H=0'),
('RelativeId', '<L=0'),
('DomainIndex', '<L=0'),
('Flags', '<L=0'),
)
class LSARPCLookupNames2Response(Structure):
structure = (
('pReferencedDomains',':', PLSAPR_REFERENCED_DOMAIN_LIST),
('Entries', '<L=0'),
('pTranslatedSids', ':', ndrutils.NDRPointerNew),
('Size', '<L=0'),
('TranslatedSids',':', LSAPR_TRANSLATED_SIDS_EX),
('MappedCount','<L=0'),
)
class LSARPCDeleteObject(Structure):
opnum = 34
alignment = 4
structure = (
('ObjectHandle','20s'),
)
class LSARPCCreateAccount(Structure):
opnum = 10
alignment = 4
structure = (
('PolicyHandle','20s'),
('AccountSid',':', RPC_SID),
('DesiredAccess','<L=0'),
)
class LSARPCCreateAccountResponse(Structure):
structure = (
('AccountHandle', '20s'),
)
class LSARPCOpenAccount(Structure):
opnum = 17
alignment = 4
structure = (
('PolicyHandle','20s'),
('AccountSid',':', RPC_SID),
('DesiredAccess','<L=0'),
)
class LSARPCOpenAccountResponse(Structure):
structure = (
('AccountHandle', '20s'),
)
class LSARPCOpenPolicy2(Structure):
opnum = 44
alignment = 4
structure = (
('ServerName',':',ndrutils.NDRUniqueStringW),
('ObjectAttributes','24s'),
('AccessMask','<L'),
)
class LSARPCOpenPolicy2Response(Structure):
structure = (
('ContextHandle','20s'),
('ErrorCode','<L'),
)
class LSARPCClose(Structure):
opnum = 0
alignment = 4
structure = (
('ContextHandle','20s'),
)
class LSARPCCloseResponse(Structure):
structure = (
('ContextHandle','20s'),
('ErrorCode','<L'),
)
class LSARPCQueryInformationPolicy2(Structure):
opnum = 46
structure = (
('ContextHandle','20s'),
('InformationClass', '<H'),
)
class LSARPCQueryInformationPolicy2Response(Structure):
structure = (
('RefID','<L'),
('Info','<L'),
('BuffSize','_-pRespBuffer','len(self.rawData)-12'),
('pRespBuffer',':'),
('ErrorCode','<L')
)
class DOMAIN_INFORMATION(Structure):
structure = (
('Length','<H'),
('Size','<H'),
('pName','<L'),
('pSid','<L'),
('Data',':'),
)
def formatDict(self):
resp = {}
resp['name'] = None
resp['sid'] = None
data = self['Data']
if self['pName'] != 0:
name = ndrutils.NDRStringW(data)
data = data[name['ActualCount']*2+12:]
if name['ActualCount'] % 2 == 1:
data = data[2:]
resp['name'] = name['Data']
if self['pSid'] != 0:
resp['sid'] = SAMR_RPC_SID(data[4:])
return resp
class SAMR_RPC_SID_STRUCT(Structure):
structure = (
('Count','<L'),
('Sid',':',SAMR_RPC_SID),
)
class SIDS_BUFF(Structure):
structure = (
('NumSids','<L'),
('RefID','<L'),
('MaxCount','<L'),
('SidsLen','_-Sids','NumSids * len(SAMR_RPC_SID_STRUCT)'),
('Sids',':'),
)
class LSARPCLookupSids(Structure):
opnum = 15
structure = (
('ContextHandle','20s'),
('SidsBuff',':',SIDS_BUFF),
('TransNames', '8s'),
('LookupLevel', '<H'),
('MappedCount', '6s'),
)
class LSARPCLookupSids3(Structure):
opnum = 76
structure = (
# ('ContextHandle','20s'),
('SidsBuff',':',SIDS_BUFF),
('TransNames', '8s'),
('LookupLevel', '<H'),
('MappedCount', '6s'),
('LookupOptions', '<L=0'),
('ClientRevision', '<L=1'),
)
class LSARPCLookupSidsResponse(Structure):
structure = (
('BuffSize','_-pSidsRespBuffer','len(self.rawData)-8'),
('pSidsRespBuffer',':'),
('Count','4s'),
('ErrorCode','<L'),
)
def formatDict(self):
elem_len = []
names_size = []
l_dict = []
sids_resp = self['pSidsRespBuffer']
dom_count = unpack('<L',sids_resp[4:8])[0]
if dom_count == 0:
ptr = 8
else:
ptr = 20
for i in range(dom_count):
elem_len.append(unpack('<H',sids_resp[ptr:ptr+2])[0])
ptr += 12
for i in range(dom_count):
elem_length = elem_len[i]
ptr += 12
l_dict.append({'domain': unpack('%ss'%elem_length, sids_resp[ptr:ptr+elem_length])[0].decode('utf16')})
ptr += elem_length + 4 #for the SID Count
if (elem_length/2) % 2 == 1:
ptr += 2
entry = SAMR_RPC_SID(sids_resp[ptr:])
l_dict[i]['sid'] = entry
ptr += len(entry)
name_count = unpack('<L',sids_resp[ptr:ptr+4])[0]
ptr += 12
for i in range(name_count):
names_size.append([unpack('<H',sids_resp[ptr+4:ptr+6])[0], unpack('<H', sids_resp[ptr:ptr+2])[0], unpack('<L', sids_resp[ptr+12:ptr+16])[0]])
ptr += 16
for i in range(name_count):
elem_length = names_size[i][0]
sid_type = names_size[i][1]
if elem_length != 0:
act_count = unpack('<L', sids_resp[ptr+8:ptr+12])[0]
ptr += 12
name = unpack('%ss'%elem_length, sids_resp[ptr:ptr+elem_length])[0].decode('utf16')
else:
act_count = 0
name = ''
ret = l_dict[names_size[i][2]].setdefault('names', [name])
if ret != [name]:
l_dict[names_size[i][2]]['names'].append(name)
ret = l_dict[names_size[i][2]].setdefault('types', [sid_type])
if ret != [sid_type]:
l_dict[names_size[i][2]]['types'].append(sid_type)
ptr += elem_length
if act_count % 2 == 1:
ptr += 2 #Only for odd numbers
return l_dict
class LSARPCSessionError(Exception):
def __init__( self, error_code):
Exception.__init__(self)
self.error_code = error_code
def get_error_code( self ):
return self.error_code
def __str__( self ):
key = self.error_code
if ERROR_MESSAGES.has_key(key):
return 'LSARPC SessionError: %s(%s)' % (ERROR_MESSAGES[self.error_code])
else:
return 'LSARPC SessionError: unknown error code: 0x%x' % (self.error_code)
class DCERPCLsarpc:
def __init__(self, dcerpc):
self._dcerpc = dcerpc
def doRequest(self, request, noAnswer = 0, checkReturn = 1):
self._dcerpc.call(request.opnum, request)
if noAnswer:
return
else:
answer = self._dcerpc.recv()
if checkReturn and answer[-4:] != '\x00\x00\x00\x00':
error_code = unpack('<L', answer[-4:])[0]
raise LSARPCSessionError(error_code)
return answer
def LsarOpenPolicy2( self, systemName, desiredAccess = 0x00020801):
"""
opens a context handle to the RPC server
:param string systemName: This parameter does not have any effect on message processing in any environment. It MUST be ignored on receipt.
:param int desiredAccess: An ACCESS_MASK value that specifies the requested access rights that MUST be granted on the returned PolicyHandle if the request is successful. Check [MS-DTYP], section 2.4.3
:return: a structure with a policy handle, call dump() to check its structure. Otherwise raises an error
"""
open_policy = LSARPCOpenPolicy2()
open_policy['ServerName'] = ndrutils.NDRUniqueStringW()
open_policy['ServerName']['Data'] = (systemName+'\x00').encode('utf-16le')
#TODO: Implement ObjectAtributes structure
open_policy['ObjectAttributes'] = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
open_policy['AccessMask'] = desiredAccess
data = self.doRequest(open_policy)
ans = LSARPCOpenPolicy2Response(data)
return ans
def LsarLookupSids( self, context_handle, sids):
"""
translates a batch of security principal SIDs to their name forms. It also returns the domains that these names are a part of.
:param HANDLE context_handle: OpenPolicy2 handle
:param list sids: list of sids to look information for ([S1, S2 ...])
:return: a structure with a list of translated sids, call dump() to see its contents. Otherwise it raises an error
"""
open_policy = LSARPCLookupSids()
open_policy['ContextHandle'] = context_handle
open_policy['SidsBuff'] = SIDS_BUFF()
open_policy['SidsBuff']['NumSids'] = len(sids)
open_policy['SidsBuff']['RefID'] = random.randint(1,65535)
open_policy['SidsBuff']['MaxCount'] = len(sids)
sids_str = ''
sid_items = 0
for sid_i in range(len(sids)):
sid_arr = sids[sid_i].split('-')
_sid = SAMR_RPC_SID_STRUCT()
sid_items += 1
_sid['Count'] = len(sid_arr) - 3
_sid['Sid'] = SAMR_RPC_SID()
_sid['Sid']['Revision'] = int(sid_arr[1])
_sid['Sid']['SubAuthorityCount'] =len(sid_arr) - 3
_sid['Sid']['IdentifierAuthority'] = SAMR_RPC_SID_IDENTIFIER_AUTHORITY()
_sid['Sid']['IdentifierAuthority']['Value'] = '\x00\x00\x00\x00\x00' + pack('B',int(sid_arr[2]))
sub_auth = ''
for elem in sid_arr[3:]:
sub_auth += pack('<L', int(elem))
_sid['Sid']['SubAuthority'] = sub_auth
sids_str += _sid.getData()
for i in range(0, sid_items):
sids_str = pack('<L',random.randint(1,65535)) + sids_str
open_policy['SidsBuff']['Sids'] = sids_str
open_policy['TransNames'] = '\x00\x00\x00\x00\x00\x00\x00\x00'
open_policy['LookupLevel'] = 1
open_policy['MappedCount'] = '\x00\x00\x00\x00\x00\x00'
data = self.doRequest(open_policy, checkReturn = 0)
packet = LSARPCLookupSidsResponse(data)
return packet
def LsarLookupSids3( self, context_handle, sids):
"""
translates a batch of security principal SIDs to their name forms. It also returns the domains that these names are a part of.
:param HANDLE context_handle: OpenPolicy2 handle
:param list sids: list of sids to look information for ([S1, S2 ...])
:return: a structure with a list of translated sids, call dump() to see its contents. Otherwise it raises an error
"""
open_policy = LSARPCLookupSids3()
open_policy['ContextHandle'] = context_handle
open_policy['SidsBuff'] = SIDS_BUFF()
open_policy['SidsBuff']['NumSids'] = len(sids)
open_policy['SidsBuff']['RefID'] = random.randint(1,65535)
open_policy['SidsBuff']['MaxCount'] = len(sids)
sids_str = ''
sid_items = 0
for sid_i in range(len(sids)):
sid_arr = sids[sid_i].split('-')
_sid = SAMR_RPC_SID_STRUCT()
sid_items += 1
_sid['Count'] = len(sid_arr) - 3
_sid['Sid'] = SAMR_RPC_SID()
_sid['Sid']['Revision'] = int(sid_arr[1])
_sid['Sid']['SubAuthorityCount'] =len(sid_arr) - 3
_sid['Sid']['IdentifierAuthority'] = SAMR_RPC_SID_IDENTIFIER_AUTHORITY()
_sid['Sid']['IdentifierAuthority']['Value'] = '\x00\x00\x00\x00\x00' + pack('B',int(sid_arr[2]))
sub_auth = ''
for elem in sid_arr[3:]:
sub_auth += pack('<L', int(elem))
_sid['Sid']['SubAuthority'] = sub_auth
sids_str += _sid.getData()
for i in range(0, sid_items):
sids_str = pack('<L',random.randint(1,65535)) + sids_str
open_policy['SidsBuff']['Sids'] = sids_str
open_policy['TransNames'] = '\x00\x00\x00\x00\x00\x00\x00\x00'
open_policy['LookupLevel'] = 1
open_policy['MappedCount'] = '\x00\x00\x00\x00\x00\x00'
data = self.doRequest(open_policy, checkReturn = 0)
packet = LSARPCLookupSidsResponse(data)
return packet
def LsarQueryInformationPolicy2(self, policyHandle, informationClass):
"""
query values that represent the server's security policy
:param HANDLE policyHandle: OpenPolicy2 handle
:param int informationClass: the information class type requests. Check [MS-LSAD], section 3.1.4.4.3. Currently supported POLICY_PRIMARY_DOMAIN_INFORMATION and POLICY_ACCOUNT_DOMAIN_INFORMATION.
:return: a structure with the requested information class. Call the dump() method to check its structure. Otherwise raises an error
"""
queryInfo = LSARPCQueryInformationPolicy2()
queryInfo['ContextHandle'] = policyHandle
queryInfo['InformationClass'] = informationClass
packet = self.doRequest(queryInfo)
data = LSARPCQueryInformationPolicy2Response(packet)
# For the answers we can parse, we return the structs, for the rest, just the data
if informationClass == POLICY_PRIMARY_DOMAIN_INFORMATION:
return DOMAIN_INFORMATION(data['pRespBuffer'])
elif informationClass == POLICY_ACCOUNT_DOMAIN_INFORMATION:
return DOMAIN_INFORMATION(data['pRespBuffer'])
else:
return data
def LsarClose( self, context_handle):
"""
frees the resources held by a context handle that was opened earlier
:param HANDLE context_handle: OpenPolicy2 handle
:return: NULL or raises an exception on error
"""
open_policy = LSARPCClose()
open_policy['ContextHandle'] = context_handle
data = self.doRequest(open_policy)
ans = LSARPCCloseResponse(data)
return ans
def LsarLookupNames2(self, policyHandle, names, lookupLevel=LsapLookupWksta, lookupOptions = 0x0, clientRevision = 0x1):
"""
translates a batch of security principal names to their SID form
:param HANDLE policyHandle: OpenPolicy2 handle
:param UNICODE names: contains the security principal names to translate (only supports one name)
:param int lookupLevel: Specifies what scopes are to be used during translation, as specified in section 2.2.16 [MS-LSAT]
:param int lookupOptions: flags that control the lookup operation. For possible values and their meanings, see section 3.1.4.5 [MS-LSAT]
:param int clientRevision: version of the client, which implies the client's capabilities. For possible values and their meanings, see section 3.1.4.5 [MS-LSAT]
:return: on successful return, call the dump() method to see its contents
"""
lookupNames2 = LSARPCLookupNames2()
lookupNames2['PolicyHandle'] = policyHandle
lookupNames2['Count'] = 1
lookupNames2['SizeIs'] = 1
rpcUnicodePtr = ndrutils.pRPC_UNICODE_STRING()
rpcUnicodePtr.setDataLen(names)
rpcUnicode = ndrutils.RPC_UNICODE_STRING()
rpcUnicode['Data'] = names
lookupNames2['Names'] = str(rpcUnicodePtr) + str(rpcUnicode)
lookupNames2['TranslatedSids'] = '\x00'*8
lookupNames2['LookupOptions'] = lookupOptions
lookupNames2['LookupLevel'] = lookupLevel
lookupNames2['MappedCount'] = 0
lookupNames2['ClientRevision'] = clientRevision
data = self.doRequest(lookupNames2)
ans = LSARPCLookupNames2Response(data)
return ans
def LsarOpenAccount(self, policyHandle, accountSid, desiredAccess=0x02000000):
"""
obtains a handle to an account object
:param HANDLE policyHandle: OpenPolicy2 handle
:param RPC_SID accountSid: A SID of the account to be opened
:param int desiredAccess: An ACCESS_MASK value that specifies the requested access rights that MUST be granted on the returned PolicyHandle if the request is successful. Check [MS-DTYP], section 2.4.3
:return: returns the AccountHandle for the opened Sid. Call dump() method to see the structure.
"""
openAccount = LSARPCOpenAccount()
openAccount['PolicyHandle'] = policyHandle
openAccount['AccountSid'] = accountSid
openAccount['DesiredAccess'] = desiredAccess
data = self.doRequest(openAccount)
ans = LSARPCOpenAccountResponse(data)
return ans
def LsarCreateAccount(self, policyHandle, accountSid, desiredAccess=0x02000000):
"""
creates a new account object in the server's database
:param HANDLE policyHandle: OpenPolicy2 handle
:param RPC_SID accountSid: A SID of the account to be opened
:param int desiredAccess: An ACCESS_MASK value that specifies the requested access rights that MUST be granted on the returned PolicyHandle if the request is successful. Check [MS-DTYP], section 2.4.3
:return: returns the AccountHandle for the created Sid. Call dump() method to see the structure.
"""
createAccount = LSARPCCreateAccount()
createAccount['PolicyHandle'] = policyHandle
createAccount['AccountSid'] = accountSid
createAccount['DesiredAccess'] = desiredAccess
data = self.doRequest(createAccount)
ans = LSARPCCreateAccountResponse(data)
return ans
def LsarDeleteObject(self, objectHandle):
"""
deletes an open account object, secret object, or trusted domain object.
:param HANDLE objectHandle: handle of the object to delete
:return: NULL or raises an exception on error
"""
deleteObject = LSARPCDeleteObject()
deleteObject['ObjectHandle'] = objectHandle
data = self.doRequest(deleteObject)
return data
def LsarSetSystemAccessAccount(self, accountHandle, systemAccess = 0x10):
"""
sets system access account flags for an account object.
:param HANDLE accountHandle: handle for a valid opened account
:param int systemAccess: a bitmask containing the account flags to be set on the account.
:return: NULL or raises an exception on error
"""
setSystemAccess = LSARPCSetSystemAccessAccount()
setSystemAccess['AccountHandle'] = accountHandle
setSystemAccess['SystemAccess'] = systemAccess
data = self.doRequest(setSystemAccess)
return data
def LsarOpenSecret(self, policyHandle, secretName, desiredAccess=0x02000000):
"""
sets system access account flags for an account object.
:param HANDLE policyHandle: OpenPolicy2 handle
:param UNICODE secretName: the name of the secret to open
:param int desiredAccess: An ACCESS_MASK value that specifies the requested access rights that MUST be granted on the returned PolicyHandle if the request is successful. Check [MS-DTYP], section 2.4.3
:return: returns the SecretHandle for the opened secret. Call dump() method to see the structure.
"""
openSecret = LSARPCOpenSecret()
openSecret['PolicyHandle'] = policyHandle
rpcUnicodePtr = ndrutils.pRPC_UNICODE_STRING()
rpcUnicodePtr.setDataLen(secretName)
rpcUnicode = ndrutils.RPC_UNICODE_STRING()
rpcUnicode['Data'] = secretName
openSecret['SecretName'] = str(rpcUnicodePtr) + str(rpcUnicode)
openSecret['DesiredAccess'] = desiredAccess
data = self.doRequest(openSecret)
ans = LSARPCOpenSecretResponse(data)
return ans
def LsarRetrievePrivateData(self, policyHandle, keyName):
"""
retrieves a secret value.
:param HANDLE policyHandle: OpenPolicy2 handle
:param UNICODE keyName: the name of the secret to retrieve
:return: returns a structure with the secret. Call dump() method to see the structure. Raises an exception on error
You can decrypt the secrets using crypto.decryptSecret(). You will need the sessionKey from the SMBConnection as the key for decryption (getSessionKey()).
"""
retrievePrivateData = LSARPCRetrievePrivateData()
retrievePrivateData['PolicyHandle'] = policyHandle
rpcUnicodePtr = ndrutils.pRPC_UNICODE_STRING()
rpcUnicodePtr.setDataLen(keyName)
rpcUnicode = ndrutils.RPC_UNICODE_STRING()
rpcUnicode['Data'] = keyName
retrievePrivateData['KeyName'] = str(rpcUnicodePtr) + str(rpcUnicode)
data = self.doRequest(retrievePrivateData)
ans = LSARPCRetrievePrivateDataResponse(data)
return ans
def LsarQuerySecret(self, secretHandle):
"""
retrieves the current and old (or previous) value of the secret object.
:param HANDLE secretHandle: LsarOpenSecret handle
:return: returns a structure with the secret. Call dump() method to see the structure. Raises an exception on error
"""
querySecret = LSARPCQuerySecret()
querySecret['SecretHandle'] = secretHandle
data = self.doRequest(querySecret)
ans = LSARPCQuerySecretResponse(data)
ans.dump()
return ans
| {
"content_hash": "859259d3049a56a517fcc481440c0866",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 208,
"avg_line_length": 34.82313829787234,
"alnum_prop": 0.6045366021308283,
"repo_name": "hecchi777/S3-SlaacSecuritySolution",
"id": "e3fe906eb25be5c5865c33e82382383d842cbe3d",
"size": "26578",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "impacket-0.9.11/impacket/dcerpc/lsarpc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1234"
},
{
"name": "C++",
"bytes": "23499"
},
{
"name": "Perl",
"bytes": "6245"
},
{
"name": "Python",
"bytes": "3644642"
},
{
"name": "Shell",
"bytes": "839"
}
],
"symlink_target": ""
} |
"""
PlexPhoto
Attributes:
NA (TYPE): Description
"""
from plexapi import media, utils
from plexapi.utils import PlexPartialObject
NA = utils.NA
@utils.register_libtype
class Photoalbum(PlexPartialObject):
"""Summary
Attributes:
addedAt (TYPE): Description
art (TYPE): Description
composite (TYPE): Description
guid (TYPE): Description
index (TYPE): Description
key (TYPE): Description
librarySectionID (TYPE): Description
listType (str): Description
ratingKey (TYPE): Description
summary (TYPE): Description
thumb (TYPE): Description
title (TYPE): Description
TYPE (str): Description
type (TYPE): Description
updatedAt (TYPE): Description
"""
TYPE = 'photoalbum'
def __init__(self, server, data, initpath):
"""Summary
Args:
server (TYPE): Description
data (TYPE): Description
initpath (TYPE): Description
"""
super(Photoalbum, self).__init__(data, initpath, server)
def _loadData(self, data):
"""Summary
Args:
data (TYPE): Description
Returns:
TYPE: Description
"""
self.listType = 'photo'
self.addedAt = utils.toDatetime(data.attrib.get('addedAt', NA))
self.art = data.attrib.get('art', NA)
self.composite = data.attrib.get('composite', NA)
self.guid = data.attrib.get('guid', NA)
self.index = utils.cast(int, data.attrib.get('index', NA))
self.key = data.attrib.get('key', NA)
self.librarySectionID = data.attrib.get('librarySectionID', NA)
self.ratingKey = data.attrib.get('ratingKey', NA)
self.summary = data.attrib.get('summary', NA)
self.thumb = data.attrib.get('thumb', NA)
self.title = data.attrib.get('title', NA)
self.type = data.attrib.get('type', NA)
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt', NA))
def photos(self):
"""Summary
Returns:
TYPE: Description
"""
path = '/library/metadata/%s/children' % self.ratingKey
return utils.listItems(self.server, path, Photo.TYPE)
def photo(self, title):
"""Summary
Args:
title (TYPE): Description
Returns:
TYPE: Description
"""
path = '/library/metadata/%s/children' % self.ratingKey
return utils.findItem(self.server, path, title)
def section(self):
"""Summary
Returns:
TYPE: Description
"""
return self.server.library.sectionByID(self.librarySectionID)
@utils.register_libtype
class Photo(PlexPartialObject):
"""Summary
Attributes:
addedAt (TYPE): Description
index (TYPE): Description
key (TYPE): Description
listType (str): Description
media (TYPE): Description
originallyAvailableAt (TYPE): Description
parentKey (TYPE): Description
parentRatingKey (TYPE): Description
ratingKey (TYPE): Description
summary (TYPE): Description
thumb (TYPE): Description
title (TYPE): Description
TYPE (str): Description
type (TYPE): Description
updatedAt (TYPE): Description
year (TYPE): Description
"""
TYPE = 'photo'
def __init__(self, server, data, initpath):
"""Summary
Args:
server (TYPE): Description
data (TYPE): Description
initpath (TYPE): Description
"""
super(Photo, self).__init__(data, initpath, server)
def _loadData(self, data):
"""Summary
Args:
data (TYPE): Description
Returns:
TYPE: Description
"""
self.listType = 'photo'
self.addedAt = utils.toDatetime(data.attrib.get('addedAt', NA))
self.index = utils.cast(int, data.attrib.get('index', NA))
self.key = data.attrib.get('key', NA)
self.originallyAvailableAt = utils.toDatetime(
data.attrib.get('originallyAvailableAt', NA), '%Y-%m-%d')
self.parentKey = data.attrib.get('parentKey', NA)
self.parentRatingKey = data.attrib.get('parentRatingKey', NA)
self.ratingKey = data.attrib.get('ratingKey', NA)
self.summary = data.attrib.get('summary', NA)
self.thumb = data.attrib.get('thumb', NA)
self.title = data.attrib.get('title', NA)
self.type = data.attrib.get('type', NA)
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt', NA))
self.year = utils.cast(int, data.attrib.get('year', NA))
if self.isFullObject():
self.media = [media.Media(self.server, e, self.initpath, self)
for e in data if e.tag == media.Media.TYPE]
def photoalbum(self):
"""Summary
Returns:
TYPE: Description
"""
return utils.listItems(self.server, self.parentKey)[0]
def section(self):
"""Summary
Returns:
TYPE: Description
"""
return self.server.library.sectionByID(self.photoalbum().librarySectionID)
| {
"content_hash": "beb3d4e643196674076210feb5bb5c54",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 82,
"avg_line_length": 29.96,
"alnum_prop": 0.5824909403013542,
"repo_name": "Hellowlol/plexapi",
"id": "cb387ee430515d79ad2f56acbb113e2623106bb7",
"size": "5267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plexapi/photo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "169061"
}
],
"symlink_target": ""
} |
import os
from flask import Flask, request
from subprocess import call
from simple_rec import run_rec
app = Flask(__name__)
@app.route('/rec',methods=['POST'])
def rec():
file=request.files['file']
file.save('search.png')
return str(run_rec())
if __name__ == "__main__":
app.run(host='0.0.0.0')
| {
"content_hash": "3a699228cdd7854bba308169a2b3ab88",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 35,
"avg_line_length": 22.357142857142858,
"alnum_prop": 0.6389776357827476,
"repo_name": "revan/facerecserver",
"id": "b7d94cb5906efb0eb578108853365528d6c302cb",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70350"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pyfanova.fanova
import pyfanova.visualizer
def parameter_plotting(dataset, data_dir, plot_dir, pairwise=False):
plt.rcdefaults()
# IMO very sloppy way to do it
clear_name = lambda X: X.split(':')[-1] if X.split(':')[0] == 'classifier' else X.split(':')[0]
#Styles
sns.set_style('whitegrid', {'axes.linewidth':1.25, 'axes.edgecolor':'0.15',
'grid.linewidth':1.5, 'grid.color':'gray'})
sns.set_color_codes()
plt.rcParams['figure.figsize'] = (12.0, 9.0)
plt.rc('text', usetex=False)
plt.rc('font', size=13.0, family='serif')
preprocessor='DeepNetIterative'
# base_data_dir = '/mhome/mendozah/autonet_GPU/results/experiment/'
## Parameter importance table
state_run_dir = os.path.join(data_dir, dataset, preprocessor, dataset, 'state-run100')
# fanova_set = pyfanova.fanova.Fanova(state_run_dir, improvement_over='QUANTILE', quantile_to_compare=0.25)
fanova_set = pyfanova.fanova.Fanova(state_run_dir)
max_marginals = 7
cols_imp_df = ['marginal', 'parameter']
temp_df = pd.DataFrame(fanova_set.print_all_marginals(max_num=max_marginals, pairwise=pairwise), columns=cols_imp_df)
# flatex = '%d_marginal_table_for_%s_over_q1.tex' % (max_marginals, dataset)
flatex = '%d_marginal_table_for_%s_default.tex' % (max_marginals, dataset)
# To avoid dots
pd.set_option('display.max_colwidth', -1)
temp_df.to_latex(os.path.join(plot_dir, 'tables', flatex))
print("Done printing latex")
pd.set_option('display.max_colwidth', 51)
if pairwise:
temp_df.loc[:, 'parameter'] = temp_df.parameter.str.split(' x ')
## Plot now the marginals
viz_set = pyfanova.visualizer.Visualizer(fanova_set)
categorical_params = fanova_set.get_config_space().get_categorical_parameters()
for p in temp_df.parameter:
fig_hyper, ax_hyper = plt.subplots(1,1)
if len(p) == 1:
label = clear_name(p[0])
if p[0] not in categorical_params:
viz_set.plot_marginal(p[0], ax=ax_hyper)
else:
viz_set.plot_categorical_marginal(p[0], ax=ax_hyper)
ax_hyper.set_xlabel(label)
else:
label = clear_name(p[0]) +'_X_'+clear_name(p[1])
if p[0] in categorical_params:
if p[1] not in categorical_params:
viz_set.plot_categorical_pairwise(p[0], p[1], ax=ax_hyper)
ax_hyper.set_xlabel(clear_name(p[1]))
ax_hyper.legend(loc='best', title=clear_name(p[0]))
else:
continue
else:
if p[1] not in categorical_params:
viz_set.plot_pairwise_marginal(p[0], p[1], ax=ax_hyper)
ax_hyper.set_xlabel(clear_name(p[0]))
ax_hyper.set_ylabel(clear_name(p[1]))
else:
viz_set.plot_categorical_pairwise(p[1], p[0], ax=ax_hyper)
ax_hyper.set_xlabel(clear_name(p[0]))
ax_hyper.legend(loc='best', title=clear_name(p[1]))
plt.tight_layout()
fig_hyper.savefig(os.path.join(plot_dir, '%s_for_%s_default.pdf' % (label, dataset)))
# fig_hyper.savefig(os.path.join(plot_dir, '%s_for_%s_over_q1.pdf' % (label, dataset)))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("experiment_dir")
parser.add_argument("dataset")
parser.add_argument("output_path")
parser.add_argument("pairwise")
args = parser.parse_args()
parameter_plotting(args.dataset, args.experiment_dir, args.output_path, args.pairwise)
| {
"content_hash": "a5288d8fb671c1180493216c9d8c02c4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 121,
"avg_line_length": 43.71264367816092,
"alnum_prop": 0.6058374967131213,
"repo_name": "hmendozap/master-arbeit-projects",
"id": "df1ceaeaaa823e831a52906876381e4f629f6068",
"size": "3819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plotting_param_distros/fanova_analysis_autonet_gpu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "337835"
},
{
"name": "Python",
"bytes": "184045"
},
{
"name": "Shell",
"bytes": "3147"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_mutual_subscribe_19(weave_wdm_next_test_base):
def test_weave_wdm_next_mutual_subscribe_19(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['total_client_count'] = 2
wdm_next_args['final_client_status'] = 2
wdm_next_args['timer_client_period'] = 5000
wdm_next_args['test_client_iterations'] = 5
wdm_next_args['test_client_delay'] = 35000
wdm_next_args['enable_client_flip'] = 1
wdm_next_args['total_server_count'] = 2
wdm_next_args['final_server_status'] = 4
wdm_next_args['timer_server_period'] = 4000
wdm_next_args['enable_server_flip'] = 1
wdm_next_args['client_clear_state_between_iterations'] = True
wdm_next_args['server_clear_state_between_iterations'] = True
wdm_next_args['client_log_check'] = [('Handler\[0\] \[(ALIVE|CONFM)\] bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] TerminateSubscription ', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['server_log_check'] = [('TimerEventHandler Ref\(\d+\) Timeout', wdm_next_args['test_client_iterations']),
('Client->kEvent_OnNotificationProcessed', wdm_next_args['test_client_iterations'] * (wdm_next_args['total_client_count'] + 1)),
('bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Handler\[0\] \[(ALIVE|CONFM)\] TerminateSubscription ', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['M19: Stress Mutual Subscribe: Root path. Null Version. Mutate data in initiator and responder. Client in initiator aborts']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test F15 and M19")
super(test_weave_wdm_next_mutual_subscribe_19, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
| {
"content_hash": "1975ccbece874335055cc90bdb00f95f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 173,
"avg_line_length": 58.431372549019606,
"alnum_prop": 0.5802013422818791,
"repo_name": "openweave/openweave-core",
"id": "d707a2207f2f167a1690cabcdd4d4bdf1c40645d",
"size": "3979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_19.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "664311"
},
{
"name": "C++",
"bytes": "13369518"
},
{
"name": "Java",
"bytes": "300780"
},
{
"name": "M4",
"bytes": "115889"
},
{
"name": "Makefile",
"bytes": "354863"
},
{
"name": "Objective-C",
"bytes": "126850"
},
{
"name": "Objective-C++",
"bytes": "302756"
},
{
"name": "Perl",
"bytes": "12136"
},
{
"name": "Python",
"bytes": "2029596"
},
{
"name": "Shell",
"bytes": "122005"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import yaml
from ray.autoscaler.autoscaler import fillout_defaults, validate_config
from ray.tests.utils import recursive_fnmatch
RAY_PATH = os.path.abspath(os.path.join(__file__, "../../"))
CONFIG_PATHS = recursive_fnmatch(
os.path.join(RAY_PATH, "autoscaler"), "*.yaml")
CONFIG_PATHS += recursive_fnmatch(
os.path.join(RAY_PATH, "tune/examples/"), "*.yaml")
class AutoscalingConfigTest(unittest.TestCase):
def testValidateDefaultConfig(self):
for config_path in CONFIG_PATHS:
with open(config_path) as f:
config = yaml.safe_load(f)
config = fillout_defaults(config)
try:
validate_config(config)
except Exception:
self.fail("Config did not pass validation test!")
if __name__ == "__main__":
unittest.main(verbosity=2)
| {
"content_hash": "39e9c59c98442613e762290bd0c3026c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 28.91176470588235,
"alnum_prop": 0.6541200406917599,
"repo_name": "ujvl/ray-ng",
"id": "ac7d6657b6906eedabdb0f1590a721d12fb5693a",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_autoscaler_yaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "349753"
},
{
"name": "C++",
"bytes": "6547"
},
{
"name": "CMake",
"bytes": "4927"
},
{
"name": "Makefile",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "260095"
},
{
"name": "Shell",
"bytes": "6666"
}
],
"symlink_target": ""
} |
'''OpenGL extension OES.rgb8_rgba8
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.rgb8_rgba8 to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/rgb8_rgba8.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.rgb8_rgba8 import *
from OpenGL.raw.GLES1.OES.rgb8_rgba8 import _EXTENSION_NAME
def glInitRgb8Rgba8OES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | {
"content_hash": "4e60efddccd463f60fa6a7a88d307959",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 32.34782608695652,
"alnum_prop": 0.7876344086021505,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "90e8678f0abc7b25dbbf0a02a257d0a8162d1c89",
"size": "744",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES1/OES/rgb8_rgba8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransferSweep(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'created': (datetime,), # noqa: E501
'amount': (str,), # noqa: E501
'iso_currency_code': (str,), # noqa: E501
'settled': (date, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'created': 'created', # noqa: E501
'amount': 'amount', # noqa: E501
'iso_currency_code': 'iso_currency_code', # noqa: E501
'settled': 'settled', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, created, amount, iso_currency_code, settled, *args, **kwargs): # noqa: E501
"""TransferSweep - a model defined in OpenAPI
Args:
id (str): Identifier of the sweep.
created (datetime): The datetime when the sweep occurred, in RFC 3339 format.
amount (str): Signed decimal amount of the sweep as it appears on your sweep account ledger (e.g. \"-10.00\") If amount is not present, the sweep was net-settled to zero and outstanding debits and credits between the sweep account and Plaid are balanced.
iso_currency_code (str): The currency of the sweep, e.g. \"USD\".
settled (date, none_type): The date when the sweep settled, in the YYYY-MM-DD format.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.created = created
self.amount = amount
self.iso_currency_code = iso_currency_code
self.settled = settled
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "fdebe28fee0cc4d31f3aaeec49e80f8f",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 267,
"avg_line_length": 41.439153439153436,
"alnum_prop": 0.5615423901940756,
"repo_name": "plaid/plaid-python",
"id": "c9af0928df17de362b6f789aaf455b7c16fa6ac0",
"size": "7832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/transfer_sweep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
} |
ALLOWED_REPOS = ['https://github.com/USER1/PROJECT1', 'https://github.com/USER2/PROJECT2']
### Alternatively, add your list of permitted organisations here
ALLOWED_ORGS = ['ORG1', 'ORG2']
### Add your sender email. Remember, senders need to be admins on your AppEngine account.
EMAIL_FROM = "Me <me@company.com>"
## Robots. Ignore commits by robots.
ROBOTS = ["robot@company.com"]
| {
"content_hash": "467e80078b0800cdd5346473501d68bf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 91,
"avg_line_length": 54.714285714285715,
"alnum_prop": 0.7232375979112271,
"repo_name": "maniksurtani/github-emailhook",
"id": "1e86a9b3861da381118be9e7a9f89d9021b673b7",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3644"
}
],
"symlink_target": ""
} |
import tensorflow
import cv2
def handler(event, context):
return 0 | {
"content_hash": "c1a4285a9f3ed7c78ba9dc22ff89e35e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 12,
"alnum_prop": 0.75,
"repo_name": "ryfeus/lambda-packs",
"id": "3fd1d7840af5b36d1f62b2dbd3f74b1c42a3dfa1",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tensorflow_OpenCV_Nightly/source/index.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import datetime
from django.test import TestCase, tag
from .models import *
from .helpers import get_quarter_verbose
from real_estate.test_helper import (
create_mock_image_file,
create_neighbourhood,
create_street,
)
NULL_PRICE = ''
NULL_DATE_OF_CONSTRUCTION = ''
def create_builder():
_objects = Builder.objects.all()
if not _objects:
return Builder.objects.create(name='Снегири')
return _objects[0]
def create_type_of_complex():
_objects = TypeOfComplex.objects.all()
if not _objects:
return TypeOfComplex.objects.create(name='Жилой комплекс')
return _objects[0]
def create_RC(name='RC', description='smthng', is_active=True, **kwargs):
""" Creates Residental Complex.
Adds builder, neighbourhood and type_of_complex objs automatically
if its not provides"""
builder = kwargs.pop('builder', create_builder())
neighbourhood = kwargs.pop('neighbourhood', create_neighbourhood())
type_of_complex = kwargs.pop('type_of_complex', create_type_of_complex())
return ResidentalComplex.objects.create(name=name,
description=description,
is_active=is_active,
builder=builder,
neighbourhood=neighbourhood,
type_of_complex=type_of_complex,
**kwargs
)
def create_apartment(rc, price, total_area=38, **kwargs):
return NewApartment.objects.create(
residental_complex=rc,
total_area=total_area,
price=price,
layout=create_mock_image_file(),
**kwargs
)
def create_building(rc, building_number=1, **kwargs):
date_of_start_of_construction = kwargs.pop(
'date_of_start_of_construction', datetime.datetime.now())
street = kwargs.pop('street', create_street())
return NewBuilding.objects.create(
residental_complex=rc,
date_of_start_of_construction=date_of_start_of_construction,
street=street,
building=building_number,
**kwargs
)
def buildings_factory(rc, buildings_amount, no_active_buildings_amount=0):
""" Create set of NewBuilding objs"""
buildings = []
for i in range(buildings_amount):
buildings.append(
create_building(
rc,
name=str(i),
building_number=i,
date_of_construction=datetime.datetime.now(),
)
)
for i in range(buildings_amount,
buildings_amount + no_active_buildings_amount):
date = datetime.datetime.now() - datetime.timedelta(days=180)
buildings.append(
create_building(
rc,
name=str(i),
building_number=i,
date_of_construction=date,
is_active=False,
)
)
return buildings
class RCBaseTest(TestCase):
def setUp(self):
self.RC = create_RC()
@tag('get_lowest_price')
class ResidentalComplexOnSaveSignalsTests(RCBaseTest):
def test_get_lowest_price_without_apartment(self):
"""
test_get_lowest_price() should minimal price of apartments which
can be shown at site (is_active=True) and which are presented
in appropriate building
(have m2m link and is_active=True for building)
"""
price = self.RC.get_lowest_price()
self.assertEqual(price, NULL_PRICE)
def test_get_lowest_price_with_apartment_with_building(self):
apartment_price = 111
building = create_building(self.RC)
apartment = create_apartment(self.RC, apartment_price)
apartment.buildings = [building]
price = self.RC.get_lowest_price()
self.assertEqual(price, apartment_price)
def test_get_lowest_price_with_apartment_without_building(self):
create_apartment(self.RC, 222)
price = self.RC.get_lowest_price()
self.assertEqual(price, NULL_PRICE)
def test_get_lowest_price_with_apartment_with_building_noactive(self):
apartment_price = 333
building = create_building(self.RC, is_active=False)
apartment = create_apartment(self.RC, apartment_price)
apartment.buildings = [building]
price = self.RC.get_lowest_price()
self.assertEqual(price, NULL_PRICE)
def test_get_lowest_price_with_apartment_noactive_with_building(self):
apartment_price = 444
building = create_building(self.RC)
apartment = create_apartment(self.RC, apartment_price, is_active=False)
apartment.buildings = [building]
price = self.RC.get_lowest_price()
self.assertEqual(price, NULL_PRICE)
@tag('slow')
def test_get_lowest_price_with_few_apartments_in_one_builing(self):
lowest_price = 555
prices = [lowest_price, lowest_price + 5, lowest_price + 10]
building = create_building(self.RC)
for apartment_price in prices:
apartment = create_apartment(self.RC, apartment_price)
apartment.buildings = [building]
price = self.RC.get_lowest_price()
self.assertEqual(price, lowest_price)
@tag('slow')
def test_get_lowest_price_with_mixed_apartments_and_buildings(self):
[
building1,
building2,
building3,
building_no_active
] = buildings_factory(self.RC, 3, 1)
apartment1 = create_apartment(self.RC, 666)
apartment1.buildings = [building1, building3, building_no_active]
apartment2 = create_apartment(self.RC, 667)
apartment2.buildings = [building2, building_no_active]
apartment3 = create_apartment(self.RC, 668)
apartment3.buildings = [building1,
building2,
building3,
building_no_active]
apartment_no_active = create_apartment(self.RC, 660, is_active=False)
apartment_no_active.buildings = [building1,
building2,
building3,
building_no_active]
apartment_with_no_active_building = create_apartment(self.RC, 650)
apartment_with_no_active_building.buildings = [building_no_active]
price = self.RC.get_lowest_price()
self.assertEqual(price, 666)
def test_not_change_slug_on_every_save(self):
first_time_slug = self.RC.slug
self.RC.save()
second_time_slug = self.RC.slug
self.assertEqual(first_time_slug, second_time_slug)
@tag('get_date_of_construction')
class ResidentalComplexNearestDatesMethodTests(RCBaseTest):
def test_get_date_of_construction_without_buildings(self):
date_of_construction = self.RC.get_date_of_construction()
self.assertEqual(date_of_construction, NULL_DATE_OF_CONSTRUCTION)
def test_get_date_of_construction_with_building(self):
date_of_construction_of_building = datetime.datetime.now()
create_building(
self.RC,
date_of_construction=date_of_construction_of_building
)
date_of_construction = self.RC.get_date_of_construction()
self.assertEqual(
date_of_construction,
get_quarter_verbose(date_of_construction_of_building)
)
def test_get_date_of_construction_with_building_no_active(self):
create_building(
self.RC,
date_of_construction=datetime.datetime.now(),
is_active=False,
)
date_of_construction = self.RC.get_date_of_construction()
self.assertEqual(date_of_construction, NULL_DATE_OF_CONSTRUCTION)
@tag('slow')
def test_get_date_of_construction_with_mixed_buildings(self):
[
building1,
building2,
building3,
building_no_active1,
building_no_active2,
] = buildings_factory(self.RC, 3, 2)
nearest_date = building1.date_of_construction
date_of_construction = self.RC.get_date_of_construction()
self.assertEqual(
date_of_construction,
get_quarter_verbose(nearest_date),
)
@tag('slow', 'view')
class ResidentalComplexListViewSearchFilterTests(TestCase):
def RCWithBuildingWithApartmentFactory(
self, rc_numbers, rc_name_prefix='RC',
rc_kwargs={}, building_kwargs={}, apartment_kwargs={}):
all_rc = []
for rc_number in range(rc_numbers):
rc = create_RC(
name=rc_name_prefix + " %s" % rc_number,
**rc_kwargs
)
all_rc.append(rc)
building = create_building(rc, **building_kwargs)
price = apartment_kwargs.pop('price', 190000)
apartment = create_apartment(rc, price, **apartment_kwargs)
apartment.buildings = [building]
return all_rc
def test_search_by_any_text_neigbourhood(self):
target_neigbourhood = create_neighbourhood('МЫС')
non_target_neigbourhood = create_neighbourhood('Дом Обороны')
number_of_rc_with_target_neigbourhood = 6
# target ResidentalComplexes
self.RCWithBuildingWithApartmentFactory(
number_of_rc_with_target_neigbourhood,
"Target RC",
rc_kwargs={'neighbourhood': target_neigbourhood}
)
# non target ResidentalComplexes
self.RCWithBuildingWithApartmentFactory(
10,
"Non-Target RC",
rc_kwargs={'neighbourhood': non_target_neigbourhood}
) + self.RCWithBuildingWithApartmentFactory(
10,
"no-active RC with appropriate neigbourhood",
rc_kwargs={
'is_active': False,
'neighbourhood': target_neigbourhood,
}
)
resp = self.client.get(
reverse('new_buildings:residental-complex-list'),
{'any_text': target_neigbourhood.name}
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(len(resp.context['residental_complexes']) ==
number_of_rc_with_target_neigbourhood)
| {
"content_hash": "7d592fe89fd92fc3c012defc7cf5546f",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 79,
"avg_line_length": 34.62126245847176,
"alnum_prop": 0.5978313021782938,
"repo_name": "Dybov/real_estate_agency",
"id": "2c0c1920b64e1f066f60eb503ad239b1d12f3c5a",
"size": "10454",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "real_estate_agency/new_buildings/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "102329"
},
{
"name": "HTML",
"bytes": "104357"
},
{
"name": "JavaScript",
"bytes": "86459"
},
{
"name": "Python",
"bytes": "259967"
}
],
"symlink_target": ""
} |
"""RNN models of BasisNet with LSTM cells.
It implements a RNN wrapper with specialized LSTM cell with bases
for the kernels.
"""
import functools
from typing import Optional
import tensorflow as tf
CLIENT_SZIE = 500000
class BasisRNNLayer(tf.keras.layers.Layer):
"""A RNN wrapper using LSTM cell with Basis kernels."""
def __init__(self,
cell,
num_units,
num_basis,
recurrent_initializer,
kernel_initializer,
return_sequences=False):
super().__init__()
self.rnn_cell = cell(
num_units=num_units,
num_basis=num_basis,
recurrent_initializer=recurrent_initializer,
kernel_initializer=kernel_initializer)
self.rnn = tf.keras.layers.RNN(
self.rnn_cell, return_sequences=return_sequences)
def call(self, input_tensor):
return self.rnn(input_tensor)
class BasisLSTMCell(tf.keras.layers.Layer):
"""A LSTM cell with Basis kernels."""
def __init__(self,
num_units,
num_basis,
kernel_initializer,
recurrent_initializer,
word_emb_size=96,
use_bias=True,
activation=None,
):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
num_basis: The number of bases to learn.
kernel_initializer: The initializer of the input/output kernels.
recurrent_initializer: The initializer of the recurrent kernels.
word_emb_size: The word embedding size.
use_bias: Add bias or not.
activation: Activation function of the inner states. Default: `tanh`.
"""
super().__init__()
self._num_basis = num_basis
self.kernel_initializer = kernel_initializer
self.recurrent_initializer = recurrent_initializer
self._num_units = num_units
self.word_emb_size = word_emb_size
self.activation = activation or tf.tanh
self.recurrent_activation = tf.sigmoid
self.use_bias = use_bias
def build(self, input_shape):
# the basis embedding is concatenated to the input embedding,
# then split out in call().
weight_shape = [self.word_emb_size, self._num_basis, 4 * self._num_units]
self.basis_kernel = self.add_weight(
shape=weight_shape,
name='kernel',
initializer=self.kernel_initializer,)
self.basis_recurrent_kernel = self.add_weight(
shape=(self._num_units, self._num_basis, self._num_units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
)
self.bias = tf.Variable([0.0]*weight_shape[-1], name='bias')
@property
def state_size(self):
return tf.compat.v1.nn.rnn_cell.LSTMStateTuple(self._num_units,
self._num_units)
@property
def output_size(self):
return self._num_units
def compose_basis(self, c_prob):
"""Compose bases into a kernel."""
composed_kernel = tf.keras.backend.sum(
tf.expand_dims(self.basis_kernel, 0) * c_prob, axis=2)
composed_recurrent_kernel = tf.keras.backend.sum(
tf.expand_dims(self.basis_recurrent_kernel, 0) * c_prob, axis=2)
return composed_kernel, composed_recurrent_kernel
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
inputs, c_prob = tf.split(inputs, [self.word_emb_size, self._num_basis], -1)
c_prob = tf.reshape(c_prob, [-1, 1, self._num_basis, 1])
composed_kernel, composed_recurrent_kernel = self.compose_basis(c_prob)
# inputs:
# [batch_size, 1, self.word_emb_size]
# composed_kernel:
# [batch_size, self.word_emb_size, self._num_units]
# outputs (need to be squeezed):
# [batch_size, 1, self._num_units]
z = tf.matmul(tf.expand_dims(inputs, 1), composed_kernel)
z += tf.matmul(tf.expand_dims(h_tm1, 1), composed_recurrent_kernel)
if self.use_bias:
z = tf.keras.backend.bias_add(z, self.bias)
z = tf.squeeze(z)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
class TransposableEmbedding(tf.keras.layers.Layer):
"""A Keras layer implements a transposed projection for output."""
def __init__(self, embedding_layer):
super().__init__()
self.embeddings = embedding_layer.embeddings
# Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
def call(self, inputs):
return tf.matmul(inputs, self.embeddings, transpose_b=True)
def create_basis_recurrent_model(vocab_size = 10000,
num_oov_buckets = 1,
embedding_size = 96,
latent_size = 670,
num_basis = 1,
seqeunce_length = 20,
name = 'rnn',
shared_embedding = False,
global_embedding_only = False,
seed = 0):
"""Constructs zero-padded keras model with the given parameters and cell.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_basis: The number of bases to learn.
seqeunce_length: The seqeunce length of an input.
name: (Optional) string to name the returned `tf.keras.Model`.
shared_embedding: (Optional) Whether to tie the input and output
embeddings.
global_embedding_only: use the global embedding only or not.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
`tf.keras.Model`.
"""
extended_vocab_size = vocab_size + 3 + num_oov_buckets # For pad/bos/eos/oov.
input_x = tf.keras.layers.Input(shape=(None,), name='input_x')
input_id = tf.keras.layers.Input(shape=(1,), dtype=tf.int64, name='input_id')
input_embedding = tf.keras.layers.Embedding(
input_dim=extended_vocab_size,
output_dim=embedding_size,
mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomUniform(seed=seed),
)
embedded = input_embedding(input_x)
projected = embedded
# Somehow if the vocabulary size is too small,
# no out-of-range error will be reported and the model is still good
basis_embeddings = tf.keras.layers.Embedding(
CLIENT_SZIE, num_basis, name='client_embedding')
if global_embedding_only:
# using id = 0 for the global embedding
basis_vec = basis_embeddings(tf.zeros_like(input_id))
else:
basis_vec = basis_embeddings(input_id)
# [batch_size, 1, num_basis]
basis_vec = tf.reshape(basis_vec, shape=[-1, 1, num_basis])
basis_prob = tf.keras.layers.Softmax()(basis_vec)
basis_tensor = tf.tile(
basis_prob,
tf.constant([1, seqeunce_length, 1], tf.int32))
projected = tf.concat([projected, basis_tensor], -1)
recurrent_initializer = tf.keras.initializers.Orthogonal(seed=seed)
kernel_initializer = tf.keras.initializers.HeNormal(seed=seed)
lstm_layer_builder = functools.partial(
BasisRNNLayer,
cell=BasisLSTMCell,
num_units=latent_size,
num_basis=num_basis,
recurrent_initializer=recurrent_initializer,
kernel_initializer=kernel_initializer,
return_sequences=True,)
dense_layer_builder = functools.partial(
tf.keras.layers.Dense,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=seed))
layer = lstm_layer_builder()
processed = layer(projected)
# A projection changes dimension from rnn_layer_size to input_embedding_size
dense_layer = dense_layer_builder(units=embedding_size)
projected = dense_layer(processed)
projected = tf.concat([projected, basis_tensor], -1)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
final_dense_layer = dense_layer_builder(
units=extended_vocab_size, activation=None)
logits = final_dense_layer(projected)
return tf.keras.Model(inputs=[input_x, input_id], outputs=logits, name=name)
def create_recurrent_model(vocab_size = 10000,
num_oov_buckets = 1,
embedding_size = 96,
latent_size = 670,
num_layers = 1,
name = 'rnn',
shared_embedding = False,
seed = 0):
"""Constructs zero-padded keras model with the given parameters and cell.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_layers: The number of layers.
name: (Optional) string to name the returned `tf.keras.Model`.
shared_embedding: (Optional) Whether to tie the input and output
embeddings.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
`tf.keras.Model`.
"""
extended_vocab_size = vocab_size + 3 + num_oov_buckets # For pad/bos/eos/oov.
input_x = tf.keras.layers.Input(shape=(None,), name='input_x')
# To be consistent with BasisNet pipeline, not using client id
input_id = tf.keras.layers.Input(shape=(1,), dtype=tf.int64, name='input_id')
input_embedding = tf.keras.layers.Embedding(
input_dim=extended_vocab_size,
output_dim=embedding_size,
mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomUniform(seed=seed),
)
embedded = input_embedding(input_x)
projected = embedded
lstm_layer_builder = functools.partial(
tf.keras.layers.LSTM,
units=latent_size,
return_sequences=True,
recurrent_initializer=tf.keras.initializers.Orthogonal(seed=seed),
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed))
dense_layer_builder = functools.partial(
tf.keras.layers.Dense,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=seed))
for _ in range(num_layers):
layer = lstm_layer_builder()
processed = layer(projected)
# A projection changes dimension from rnn_layer_size to input_embedding_size
dense_layer = dense_layer_builder(units=embedding_size)
projected = dense_layer(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
final_dense_layer = dense_layer_builder(
units=extended_vocab_size, activation=None)
logits = final_dense_layer(projected)
return tf.keras.Model(inputs=[input_x, input_id], outputs=logits, name=name)
| {
"content_hash": "fd0af53bb33690d2c6a28ab1fee3193c",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 80,
"avg_line_length": 35.316770186335404,
"alnum_prop": 0.6475553992261696,
"repo_name": "google-research/google-research",
"id": "5cfb9e66584ba8f1b4bd8bad08a1ba507744b7d6",
"size": "11980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basisnet/personalization/centralized_so_nwp/stackoverflow_basis_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import datetime
import re
import simplejson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models import signals
from oembed.constants import DEFAULT_OEMBED_TTL, MIN_OEMBED_TTL, RESOURCE_TYPES
from oembed.exceptions import AlreadyRegistered, NotRegistered, OEmbedMissingEndpoint, OEmbedException
from oembed.models import StoredOEmbed, StoredProvider
from oembed.providers import BaseProvider, DjangoProvider
from oembed.resources import OEmbedResource
from oembed.utils import fetch_url, relative_to_full
class ProviderSite(object):
def __init__(self):
self.clear()
def invalidate_providers(self):
self._populated = False
def clear(self):
self._registry = {}
self._registered_providers = []
self.invalidate_providers()
def register(self, provider_class):
"""
Registers a provider with the site.
"""
if not issubclass(provider_class, BaseProvider):
raise TypeError('%s is not a subclass of BaseProvider' % provider_class.__name__)
if provider_class in self._registered_providers:
raise AlreadyRegistered('%s is already registered' % provider_class.__name__)
if issubclass(provider_class, DjangoProvider):
# set up signal handler for cache invalidation
signals.post_save.connect(
self.invalidate_stored_oembeds,
sender=provider_class._meta.model
)
# don't build the regex yet - if not all urlconfs have been loaded
# and processed at this point, the DjangoProvider instances will fail
# when attempting to reverse urlpatterns that haven't been created.
# Rather, the regex-list will be populated once, on-demand.
self._registered_providers.append(provider_class)
# flag for re-population
self.invalidate_providers()
def unregister(self, provider_class):
"""
Unregisters a provider from the site.
"""
if not issubclass(provider_class, BaseProvider):
raise TypeError('%s must be a subclass of BaseProvider' % provider_class.__name__)
if provider_class not in self._registered_providers:
raise NotRegistered('%s is not registered' % provider_class.__name__)
self._registered_providers.remove(provider_class)
# flag for repopulation
self.invalidate_providers()
def populate(self):
"""
Populate the internal registry's dictionary with the regexes for each
provider instance
"""
self._registry = {}
for provider_class in self._registered_providers:
instance = provider_class()
self._registry[instance] = instance.regex
for stored_provider in StoredProvider.objects.active():
self._registry[stored_provider] = stored_provider.regex
self._populated = True
def ensure_populated(self):
"""
Ensure not only that the internal registry of Python-class providers is
populated, but also make sure the cached queryset of database-providers
is up-to-date
"""
if not self._populated:
self.populate()
def get_registry(self):
"""
Return a dictionary of {provider_instance: regex}
"""
self.ensure_populated()
return self._registry
def get_providers(self):
"""Provide a list of all oembed providers that are being used."""
return self.get_registry().keys()
def provider_for_url(self, url):
"""
Find the right provider for a URL
"""
for provider, regex in self.get_registry().items():
if re.match(regex, url) is not None:
return provider
raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url)
def invalidate_stored_oembeds(self, sender, instance, created, **kwargs):
"""
A hook for django-based oembed providers to delete any stored oembeds
"""
ctype = ContentType.objects.get_for_model(instance)
StoredOEmbed.objects.filter(
object_id=instance.pk,
content_type=ctype).delete()
def embed(self, url, **kwargs):
"""
The heart of the matter
"""
try:
# first figure out the provider
provider = self.provider_for_url(url)
except OEmbedMissingEndpoint:
raise
else:
try:
# check the database for a cached response, because of certain
# race conditions that exist with get_or_create(), do a filter
# lookup and just grab the first item
stored_match = StoredOEmbed.objects.filter(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None),
date_expires__gte=datetime.datetime.now())[0]
return OEmbedResource.create_json(stored_match.response_json)
except IndexError:
# query the endpoint and cache response in db
# prevent None from being passed in as a GET param
params = dict([(k, v) for k, v in kwargs.items() if v])
# request an oembed resource for the url
resource = provider.request_resource(url, **params)
try:
cache_age = int(resource.cache_age)
if cache_age < MIN_OEMBED_TTL:
cache_age = MIN_OEMBED_TTL
except:
cache_age = DEFAULT_OEMBED_TTL
date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age)
stored_oembed, created = StoredOEmbed.objects.get_or_create(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None))
stored_oembed.response_json = resource.json
stored_oembed.resource_type = resource.type
stored_oembed.date_expires = date_expires
if resource.content_object:
stored_oembed.content_object = resource.content_object
stored_oembed.save()
return resource
def autodiscover(self, url):
"""
Load up StoredProviders from url if it is an oembed scheme
"""
headers, response = fetch_url(url)
if headers['content-type'] in ('application/json', 'text/javascript'):
provider_data = simplejson.loads(response)
return self.store_providers(provider_data)
def store_providers(self, provider_data):
"""
Iterate over the returned json and try to sort out any new providers
"""
if not hasattr(provider_data, '__iter__'):
raise OEmbedException('Autodiscovered response not iterable')
provider_pks = []
for provider in provider_data:
if 'endpoint' not in provider or \
'matches' not in provider:
continue
resource_type = provider.get('type')
if resource_type not in RESOURCE_TYPES:
continue
stored_provider, created = StoredProvider.objects.get_or_create(
wildcard_regex=provider['matches']
)
if created:
stored_provider.endpoint_url = relative_to_full(
provider['endpoint'],
provider['matches']
)
stored_provider.resource_type = resource_type
stored_provider.save()
provider_pks.append(stored_provider.pk)
return StoredProvider.objects.filter(pk__in=provider_pks)
# just like django admin
site = ProviderSite()
| {
"content_hash": "5c1bd3e54643c9cb77e4a40ced9f14a3",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 102,
"avg_line_length": 37.542986425339365,
"alnum_prop": 0.5751476437266482,
"repo_name": "ericholscher/djangoembed",
"id": "bb1969b43b9ce1c9f2c6565860f2a1d20ba3ee15",
"size": "8297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oembed/sites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "49353"
},
{
"name": "Python",
"bytes": "129032"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import functools
import re
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import tornado.web
import tornado.auth
from tornado import httpclient
from tornado.options import options
from celery.utils.imports import instantiate
from ..views import BaseHandler
class GoogleAuth2LoginHandler(BaseHandler, tornado.auth.GoogleOAuth2Mixin):
_OAUTH_SETTINGS_KEY = 'oauth'
@tornado.web.asynchronous
def get(self):
redirect_uri = self.settings[self._OAUTH_SETTINGS_KEY]['redirect_uri']
if self.get_argument('code', False):
self.get_authenticated_user(
redirect_uri=redirect_uri,
code=self.get_argument('code'),
callback=self._on_auth,
)
else:
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.settings[self._OAUTH_SETTINGS_KEY]['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'}
)
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(403, 'Google auth failed')
access_token = user['access_token']
try:
response = httpclient.HTTPClient().fetch(
'https://www.googleapis.com/plus/v1/people/me',
headers={'Authorization': 'Bearer %s' % access_token})
except Exception as e:
raise tornado.web.HTTPError(403, 'Google auth failed: %s' % e)
email = json.loads(response.body.decode('utf-8'))['emails'][0]['value']
if not re.match(self.application.options.auth, email):
message = (
"Access denied to '{email}'. Please use another account or "
"ask your admin to add your email to flower --auth."
).format(email=email)
raise tornado.web.HTTPError(403, message)
self.set_secure_cookie("user", str(email))
next = self.get_argument('next', '/')
self.redirect(next)
class LoginHandler(BaseHandler):
def __new__(cls, *args, **kwargs):
return instantiate(options.auth_provider, *args, **kwargs)
class GithubLoginHandler(BaseHandler, tornado.auth.OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "https://github.com/login/oauth/authorize"
_OAUTH_ACCESS_TOKEN_URL = "https://github.com/login/oauth/access_token"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'oauth'
@tornado.auth._auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback):
http = self.get_auth_http_client()
body = urlencode({
"redirect_uri": redirect_uri,
"code": code,
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
"grant_type": "authorization_code",
})
http.fetch(
self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback),
method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}, body=body)
@tornado.web.asynchronous
def _on_access_token(self, future, response):
if response.error:
future.set_exception(tornado.auth.AuthError(
'OAuth authentication error: %s' % str(response)))
return
future.set_result(json.loads(response.body.decode('utf-8')))
def get_auth_http_client(self):
return httpclient.AsyncHTTPClient()
@tornado.web.asynchronous
def get(self):
redirect_uri = self.settings[self._OAUTH_SETTINGS_KEY]['redirect_uri']
if self.get_argument('code', False):
self.get_authenticated_user(
redirect_uri=redirect_uri,
code=self.get_argument('code'),
callback=self._on_auth,
)
else:
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.settings[self._OAUTH_SETTINGS_KEY]['key'],
scope=['user:email'],
response_type='code',
extra_params={'approval_prompt': 'auto'}
)
@tornado.web.asynchronous
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, 'OAuth authentication failed')
access_token = user['access_token']
req = httpclient.HTTPRequest(
'https://api.github.com/user/emails',
headers={'Authorization': 'token ' + access_token,
'User-agent': 'Tornado auth'})
response = httpclient.HTTPClient().fetch(req)
emails = [email['email'].lower() for email in json.loads(response.body.decode('utf-8'))
if email['verified'] and re.match(self.application.options.auth, email['email'])]
if not emails:
message = (
"Access denied. Please use another account or "
"ask your admin to add your email to flower --auth."
)
raise tornado.web.HTTPError(403, message)
self.set_secure_cookie("user", str(emails.pop()))
next_ = self.get_argument('next', '/')
self.redirect(next_)
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie('user')
self.render('404.html', message='Successfully logged out!')
| {
"content_hash": "0da1eb34df5256eb61114d7951f96a70",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 99,
"avg_line_length": 35.0125,
"alnum_prop": 0.5915744377008212,
"repo_name": "asmodehn/flower",
"id": "cbfb92a13cbf7a0ba3b3f8af8a8ab9096ed1222f",
"size": "5602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flower/views/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20860"
},
{
"name": "HTML",
"bytes": "32785"
},
{
"name": "JavaScript",
"bytes": "28302"
},
{
"name": "Python",
"bytes": "150289"
}
],
"symlink_target": ""
} |
from types import LambdaType
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import nlopt
import numpy as np
from meep import Block, EigenModeSource, MaterialGrid, Simulation, Vector3, Volume
from meep.adjoint import DesignRegion, EigenmodeCoefficient, OptimizationProblem
from meep.visualization import get_2D_dimensions
from numpy import ndarray
import gdsfactory as gf
from gdsfactory import Component
from gdsfactory.simulation.gmeep import get_simulation
from gdsfactory.tech import LayerStack
from gdsfactory.types import Layer
def get_meep_adjoint_optimizer(
component: Component,
objective_function: Callable,
design_regions: List[DesignRegion],
design_variables: List[MaterialGrid],
design_update: np.ndarray,
TE_mode_number: int = 1,
resolution: int = 30,
cell_size: Optional[Tuple] = None,
extend_ports_length: Optional[float] = 10.0,
layer_stack: Optional[LayerStack] = None,
zmargin_top: float = 3.0,
zmargin_bot: float = 3.0,
tpml: float = 1.5,
clad_material: str = "SiO2",
is_3d: bool = False,
wavelength_start: float = 1.5,
wavelength_stop: float = 1.6,
wavelength_points: int = 50,
dfcen: float = 0.2,
port_source_name: str = "o1",
port_margin: float = 3,
distance_source_to_monitors: float = 0.2,
port_source_offset: float = 0,
port_monitor_offset: float = 0,
dispersive: bool = False,
material_name_to_meep: Optional[Dict[str, Union[str, float]]] = None,
**settings,
):
"""Return a Meep `OptimizationProblem` object.
Args:
component: gdsfactory component.
objective_function: functions must be composed of "field functions" that transform the recorded fields.
design_regions: list of DesignRegion objects.
design_variables: list of MaterialGrid objects.
design_update: ndarray to intializethe optimization.
TE_mode_number: TE mode number.
resolution: in pixels/um (20: for coarse, 120: for fine).
cell_size: tuple of Simulation object dimensions in um.
extend_ports_length: to extend ports beyond the PML.
layer_stack: contains layer to thickness, zmin and material.
Defaults to active pdk.layer_stack.
zmargin_top: thickness for cladding above core.
zmargin_bot: thickness for cladding below core.
tpml: PML thickness (um).
clad_material: material for cladding.
is_3d: if True runs in 3D.
wavelength_start: wavelength min (um).
wavelength_stop: wavelength max (um).
wavelength_points: wavelength steps.
dfcen: delta frequency.
port_source_name: input port name.
port_margin: margin on each side of the port.
distance_source_to_monitors: in (um) source goes before.
port_source_offset: offset between source GDS port and source MEEP port.
port_monitor_offset: offset between monitor GDS port and monitor MEEP port.
dispersive: use dispersive material models (requires higher resolution).
material_name_to_meep: map layer_stack names with meep material database name
or refractive index. dispersive materials have a wavelength dependent index.
Keyword Args:
settings: extra simulation settings (resolution, symmetries, etc.)
Returns:
opt: OptimizationProblem object
"""
sim_dict = get_simulation(
component,
resolution=resolution,
extend_ports_length=extend_ports_length,
layer_stack=layer_stack,
zmargin_top=zmargin_top,
zmargin_bot=zmargin_bot,
tpml=tpml,
clad_material=clad_material,
is_3d=is_3d,
wavelength_start=wavelength_start,
wavelength_stop=wavelength_stop,
wavelength_points=wavelength_points,
dfcen=dfcen,
port_source_name=port_source_name,
port_margin=port_margin,
distance_source_to_monitors=distance_source_to_monitors,
port_source_offset=port_source_offset,
port_monitor_offset=port_monitor_offset,
dispersive=dispersive,
material_name_to_meep=material_name_to_meep,
**settings,
)
sim = sim_dict["sim"]
design_regions_geoms = [
Block(
center=design_region.center,
size=design_region.size,
material=design_variable,
)
for design_region, design_variable in zip(design_regions, design_variables)
]
for design_region_geom in design_regions_geoms:
sim.geometry.append(design_region_geom)
cell_thickness = sim.cell_size[2]
monitors = sim_dict["monitors"]
ob_list = [
EigenmodeCoefficient(
sim,
Volume(
center=monitor.regions[0].center,
size=monitor.regions[0].size,
),
TE_mode_number,
)
for monitor in monitors.values()
]
c = component.copy()
for design_region, design_variable in zip(design_regions, design_variables):
sim.geometry.append(
Block(design_region.size, design_region.center, material=design_variable)
)
block = c << gf.components.rectangle(
(design_region.size[0], design_region.size[1])
)
block.center = (design_region.center[0], design_region.center[1])
sim.cell_size = (
Vector3(*cell_size)
if cell_size
else Vector3(
c.xsize + 2 * sim.boundary_layers[0].thickness,
c.ysize + 2 * sim.boundary_layers[0].thickness,
cell_thickness,
)
)
source = [
EigenModeSource(
sim.sources[0].src,
eig_band=1,
direction=sim.sources[0].direction,
eig_kpoint=Vector3(1, 0, 0),
size=sim.sources[0].size,
center=sim.sources[0].center,
)
]
sim.sources = source
opt = OptimizationProblem(
simulation=sim,
objective_functions=[objective_function],
objective_arguments=ob_list,
design_regions=design_regions,
frequencies=sim_dict["freqs"],
decay_by=settings.get("decay_by", 1e-5),
)
opt.update_design([design_update])
opt.plot2D(True)
return opt
def run_meep_adjoint_optimizer(
number_of_params: int,
cost_function: LambdaType,
update_variable: np.ndarray,
maximize_cost_function: bool = True,
algorithm: int = nlopt.LD_MMA,
lower_bound: Any = 0,
upper_bound: Any = 1,
maxeval: int = 10,
get_optimized_component: bool = False,
opt: OptimizationProblem = None,
**kwargs,
) -> Union[ndarray, Component]:
"""Run adjoint optimization using Meep.
Args:
number_of_params: number of parameters to optimize (usually resolution_in_x * resolution_in_y).
cost_function: cost function to optimize.
update_variable: variable to update the optimization with.
maximize_cost_function: if True, maximize the cost function, else minimize it.
algorithm: nlopt algorithm to use (default: nlopt.LD_MMA).
lower_bound: lower bound for the optimization.
upper_bound: upper bound for the optimization.
maxeval: maximum number of evaluations.
get_optimized_component: if True, returns the optimized gdsfactory Component.
If this is True, the O ptimization object used for the optimization must be passed as an argument.
opt: OptimizationProblem object used for the optimization. Used only if get_optimized_component is True.
Keyword Args:
fcen: center frequency of the source.
upscale_factor: upscale factor for the optimization's grid.
threshold_offset_from_max: threshold offset from max eps value.
layer: layer to apply to the optimized component.
"""
solver = nlopt.opt(algorithm, number_of_params)
solver.set_lower_bounds(lower_bound)
solver.set_upper_bounds(upper_bound)
if maximize_cost_function:
solver.set_max_objective(cost_function)
else:
solver.set_min_objective(cost_function)
solver.set_maxeval(maxeval)
update_variable[:] = solver.optimize(update_variable)
if get_optimized_component:
fcen = kwargs.get("fcen", 1 / 1.55)
upscale_factor = kwargs.get("upscale_factor", 2)
threshold_offset_from_max = kwargs.get("threshold_offset_from_max", 0.01)
layer = kwargs.get("layer", (1, 0))
return get_component_from_sim(
opt.sim, fcen, upscale_factor, threshold_offset_from_max, layer
)
return update_variable
def get_component_from_sim(
sim: Simulation,
fcen: float = 1 / 1.55,
upscale_factor: int = 2,
threshold_offset_from_max: float = 2.0,
layer: Layer = (1, 0),
) -> Component:
"""Get gdsfactory Component from Meep Simulation object.
Args:
sim: Meep Simulation object.
fcen: center frequency of the source.
upscale_factor: upscale factor for the optimization's grid.
threshold_offset_from_max: threshold offset from max eps value.
layer: layer to apply to the optimized component.
Returns:
gdsfactory Component.
"""
grid_resolution = upscale_factor * sim.resolution
sim_center, sim_size = get_2D_dimensions(sim, output_plane=None)
xmin = sim_center.x - sim_size.x / 2
xmax = sim_center.x + sim_size.x / 2
ymin = sim_center.y - sim_size.y / 2
ymax = sim_center.y + sim_size.y / 2
Nx = int((xmax - xmin) * grid_resolution + 1)
Ny = int((ymax - ymin) * grid_resolution + 1)
xtics = np.linspace(xmin, xmax, Nx)
ytics = np.linspace(ymin, ymax, Ny)
ztics = np.array([sim_center.z])
eps_data = np.real(sim.get_epsilon_grid(xtics, ytics, ztics, frequency=fcen))
return gf.read.from_np(
eps_data,
nm_per_pixel=1e3 / grid_resolution,
layer=layer,
threshold=np.max(eps_data) - threshold_offset_from_max,
)
def _example_optim_geometry() -> Component:
"""Dummy example of a component to optimize."""
from meep import Medium
design_region_width = 5
design_region_height = 4
resolution = 20
design_region_resolution = int(5 * resolution)
Nx = int(design_region_resolution * design_region_width)
Ny = int(design_region_resolution * design_region_height)
pml_size = 1.0
waveguide_length = 0.5
Sx = 2 * pml_size + 2 * waveguide_length + design_region_width
SiO2 = Medium(index=1.44)
Si = Medium(index=3.4)
design_variables = MaterialGrid(Vector3(Nx, Ny), SiO2, Si, grid_type="U_MEAN")
design_region = DesignRegion(
design_variables,
volume=Volume(
center=Vector3(),
size=Vector3(design_region_width, design_region_height, 0),
),
)
c = Component("mmi1x2")
arm_separation = 1.0
straight1 = c << gf.components.straight(Sx / 2 + 1)
straight1.move(straight1.ports["o2"], (-design_region_width / 2.0, 0))
straight2 = c << gf.components.straight(Sx / 2 + 1)
straight2.move(
straight2.ports["o1"], (design_region_width / 2.0, (arm_separation + 1.0) / 2.0)
)
straight3 = c << gf.components.straight(Sx / 2 + 1)
straight3.move(
straight3.ports["o1"],
(design_region_width / 2.0, (-arm_separation - 1.0) / 2.0),
)
c.add_port("o1", port=straight1.ports["o1"])
c.add_port("o2", port=straight2.ports["o2"])
c.add_port("o3", port=straight3.ports["o2"])
return design_region, design_variables, c, Nx, Ny
if __name__ == "__main__":
import autograd.numpy as npa
eta_i = 0.5
design_region, design_variables, c, Nx, Ny = _example_optim_geometry()
seed = 240
np.random.seed(seed)
x0 = np.random.rand(
Nx * Ny,
)
def J(source, top, bottom):
power = npa.abs(top / source) ** 2 + npa.abs(bottom / source) ** 2
return npa.mean(power)
opt = get_meep_adjoint_optimizer(
c,
J,
[design_region],
[design_variables],
x0,
cell_size=(15, 8),
extend_ports_length=0,
port_margin=0.75,
port_source_offset=-3.5,
port_monitor_offset=-3.5,
)
opt.plot2D(True)
| {
"content_hash": "6eaa6f104088591ce416f5033f3ae171",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 112,
"avg_line_length": 33.95567867036011,
"alnum_prop": 0.637379670419318,
"repo_name": "gdsfactory/gdsfactory",
"id": "be27df166ed4bdcbd25c49a739dc718476f55b95",
"size": "12258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdsfactory/simulation/gmeep/meep_adjoint_optimization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "605"
},
{
"name": "Dockerfile",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "2471982"
},
{
"name": "Shell",
"bytes": "671"
},
{
"name": "XS",
"bytes": "10045"
}
],
"symlink_target": ""
} |
import os
import pickle
import numpy as np
import h5py
from sklearn.externals import joblib
from gala import imio, agglo, features, classify
fman = features.default.snemi3d()
def train(index):
outfn = 'training-data-%i.h5' % index
if os.path.exists(outfn):
data, labels = classify.load_training_data_from_disk(outfn,
names=['data',
'labels'])
else:
ws_tr = imio.read_image_stack('watershed-%i.lzf.h5' % index)
pr_tr = imio.read_image_stack('probabilities-inv-norm-%i.lzf.h5' % index)
gt_tr = imio.read_image_stack('ground-truth-%i.lzf.h5' % index)
g = agglo.Rag(ws_tr, pr_tr,
feature_manager=fman)
data, labels = g.learn_agglomerate(gt_tr, fman, min_num_epochs=4)[0][:2]
classify.save_training_data_to_disk([data, labels],
fn=outfn,
names=['data', 'labels'])
print('total training data:', data.shape)
print('size in MB:', data.size * data.itemsize / 1e6)
rf = classify.DefaultRandomForest(n_jobs=6)
rf.fit(data, labels[:, 0])
policy = agglo.classifier_probability(fman, rf)
return policy
def test(index, policy):
ws = imio.read_image_stack('watershed-%i.lzf.h5' % index)
pr = imio.read_image_stack('probabilities-inv-norm-%i.lzf.h5' % index)
g = agglo.Rag(ws, pr, merge_priority_function=policy,
feature_manager=fman)
g.agglomerate(np.inf)
return g.tree
def train_test_pair(training_index, testing_index):
print('training %i' % training_index)
policy = train(training_index)
print('testing %i' % testing_index)
tree = test(testing_index, policy)
with open('results-%i-tr%i.pickle' % (testing_index, training_index),
'wb') as fout:
pickle.dump(tree, fout, protocol=-1)
return tree
def write_saalfeld(fn, raw, labels, res=np.array([12., 1, 1])):
imio.write_h5_stack(raw, fn, group='raw')
imio.write_h5_stack(labels, fn, group='labels')
f = h5py.File(fn, 'a')
f['/raw'].attrs['resolution'] = res
f['/labels'].attrs['resolution'] = res
f.close()
if __name__ == '__main__':
index_pairs = [(3 - ts, ts) for ts in range(4)]
trees = joblib.Parallel(n_jobs=4)(joblib.delayed(train_test_pair)(*p)
for p in index_pairs)
trees = dict(zip(index_pairs, trees))
with open('results.pickle', 'wb') as fout:
pickle.dump(trees, fout, protocol=-1)
images = imio.read_image_stack('/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tiff')
wss = [imio.read_image_stack('watershed-%i.lzf.h5' % i) for i in range(4)]
maps = [t.get_map(0.5) for t in trees]
segs = [m[ws] for m, ws in zip(maps, wss)]
seg = np.zeros(images.shape, dtype=np.uint64)
seg[:, :625, :625] = segs[0]
seg[:, :625, 625:] = segs[1] + np.max(segs[0])
seg[:, 625:, :625] = segs[2] + np.max(segs[0]) + np.max(segs[1])
seg[:, 625:, 625:] = segs[3] + np.max(segs[0]) + np.max(segs[1]) + np.max(segs[2])
write_saalfeld('/groups/saalfeld/saalfeldlab/concha/sample_A/juan/corners-segments2.h5',
images, seg)
| {
"content_hash": "e8a21aaa32a1a20896fd2a61213a3877",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 98,
"avg_line_length": 40.06024096385542,
"alnum_prop": 0.5759398496240602,
"repo_name": "jni/gala-scripts",
"id": "e7fc79d3dd97a795e1c596ec0460d8cbeea3d96a",
"size": "3325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corners.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12253"
}
],
"symlink_target": ""
} |
import re
import subprocess
class ArpScraper(object):
# this entire class is obviously full of peril and shame
# ... only tested on Ubuntu 14.04...
# A cursory search for python arp libraries returned immature
# libraries focused on crafting arp packets rather than querying
# known system data, so here we are.
ip_re = re.compile('[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}')
mac_re = re.compile('..:..:..:..:..:..')
def __init__(self):
pass
def ip_lookup(self, mac_addresses=[]):
arp_dict = self.parse_system_arp()
addresses = []
for mac in mac_addresses:
addresses.append((mac, arp_dict.get(mac, "")))
return addresses
def parse_system_arp(self):
arp_dict = {}
arp_out = subprocess.check_output(['arp', '-a'])
for line in arp_out.split('\n'):
try:
ip = self.ip_re.search(line).group(0)
mac = self.mac_re.search(line).group(0)
arp_dict[mac] = ip
except:
pass
return arp_dict
| {
"content_hash": "127ce52e78dff63a718266377575aa2f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 69,
"avg_line_length": 32.3235294117647,
"alnum_prop": 0.545950864422202,
"repo_name": "timfreund/ceph-libvirt-clusterer",
"id": "a4280a2daf7666ddb59d7859c8aa6e15e1db3509",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cephlvc/network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12095"
}
],
"symlink_target": ""
} |
from google.cloud import recaptchaenterprise_v1
async def sample_retrieve_legacy_secret_key():
# Create a client
client = recaptchaenterprise_v1.RecaptchaEnterpriseServiceAsyncClient()
# Initialize request argument(s)
request = recaptchaenterprise_v1.RetrieveLegacySecretKeyRequest(
key="key_value",
)
# Make the request
response = await client.retrieve_legacy_secret_key(request=request)
# Handle the response
print(response)
# [END recaptchaenterprise_v1_generated_RecaptchaEnterpriseService_RetrieveLegacySecretKey_async]
| {
"content_hash": "27549db01e16dc2aae2e14f1f506772c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 97,
"avg_line_length": 30.263157894736842,
"alnum_prop": 0.7617391304347826,
"repo_name": "googleapis/python-recaptcha-enterprise",
"id": "23e9e9a24dfde6058504f21d0c153ea914b4cb8e",
"size": "2019",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/recaptchaenterprise_v1_generated_recaptcha_enterprise_service_retrieve_legacy_secret_key_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4074"
},
{
"name": "Python",
"bytes": "503467"
},
{
"name": "Shell",
"bytes": "30702"
}
],
"symlink_target": ""
} |
import httplib
import collections
import types
import time
from urllib import urlencode
from .signatures import generate_hmac_signature, HttpRequest
from .responses import *
class PayPalAPInterface(object):
def __init__(self, config=None, **kwargs):
if config:
self.config = config
else:
self.config = PayPalConfig(**kwargs)
def _encode_utf8(self, kwargs):
unencoded_pairs = kwargs
for i in unencoded_pairs.keys():
if isinstance(unencoded_pairs[i], types.UnicodeType):
unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8')
return unencoded_pairs
def generate_preapproval_redirect_url(self, preapproval_key):
url_vars = (self.config.PAYPAL_URL_BASE, preapproval_key)
return '%s?cmd=_ap-preapproval&preapprovalkey=%s' % url_vars
def generate_permissions_redirect_url(self, request_token):
url_vars = (self.config.PAYPAL_URL_BASE, request_token)
return '%s?cmd=_grant-permission&request_token=%s' % url_vars
def generate_payment_redirect_url(self, pay_key):
url_vars = (self.config.PAYPAL_URL_BASE, pay_key)
return '%s?cmd=_ap-payment&paykey=%s' % url_vars
def _call(self, method, part, xppheader=None, **kwargs):
headers = {
'X-PAYPAL-APPLICATION-ID': self.config.API_APPLICATION_ID,
'X-PAYPAL-REQUEST-DATA-FORMAT': 'NV',
'X-PAYPAL-RESPONSE-DATA-FORMAT': 'NV'
}
if xppheader is None:
headers.update({
'X-PAYPAL-SECURITY-USERID': self.config.API_USERID,
'X-PAYPAL-SECURITY-PASSWORD': self.config.API_PASSWORD,
'X-PAYPAL-SECURITY-SIGNATURE': self.config.API_SIGNATURE,
})
else:
headers.update({
'X-PAYPAL-AUTHORIZATION': xppheader
})
params = collections.OrderedDict()
for key in sorted(kwargs.iterkeys()): # Ordering is important!
params[key] = kwargs[key]
params.update({
'requestEnvelope.errorLanguage': 'en_US',
'requestEnvelope.detailLevel': 'ReturnAll'
})
enc_params = urlencode(self._encode_utf8(params))
if part == '/AdaptivePayments/':
api_endpoint = self.config.API_ENDPOINT['AP']
elif part == '/Permissions/':
api_endpoint = self.config.API_ENDPOINT['AP']
conn = httplib.HTTPSConnection(api_endpoint,
timeout=self.config.HTTP_TIMEOUT)
conn.request('POST', part + method, enc_params, headers)
response = conn.getresponse()
response = PayPalAPResponse(response.read(), self.config)
conn.close()
return response
def callAP(self, method, **kwargs):
return self._call(method, '/AdaptivePayments/', **kwargs)
def callPermissions(self, method, **kwargs):
return self._call(method, '/Permissions/', **kwargs)
def callPermissionsOnBehalf(self, method, access_token=None,
secret_token=None, **kwargs):
timestamp = int(time.time())
uri = 'https://%s/Permissions/GetBasicPersonalData' % \
self.config.API_ENDPOINT
http_request = HttpRequest(uri=uri, method='POST')
signature = generate_hmac_signature(
http_request, self.config.API_USERID, self.config.API_PASSWORD,
timestamp, '1.0', access_token, secret_token)
xppheader = 'timestamp=%d,token=%s,signature=%s' % \
(timestamp, access_token, signature)
return self._call(method, '/Permissions/', xppheader=xppheader, **kwargs)
def pay(self, receivers, **kwargs):
for index, receiver in enumerate(receivers):
for key, value in receiver.iteritems():
arg_name = 'receiverList.receiver(%d).%s' % (index, key)
kwargs[arg_name] = receiver[key]
return self.callAP('Pay', **kwargs)
def refund(self, receivers, **kwargs):
for index, receiver in enumerate(receivers):
for key, value in receiver.iteritems():
arg_name = 'receiverList.receiver(%d).%s' % (index, key)
kwargs[arg_name] = receiver[key]
return self.callAP('Refund', **kwargs)
def get_payment_details(self, pay_key):
return self.callAP('PaymentDetails', payKey=pay_key)
def set_payment_options(self, pay_key):
return self.callAP('SetPaymentOptions', payKey=pay_key)
def execute_payment(self, pay_key):
return self.callAP('ExecutePayment', payKey=pay_key)
class PayPalECInterface(object):
def __init__(self , config=None, **kwargs):
if config:
self.config = config
else:
self.config = PayPalConfig(**kwargs)
def _encode_utf8(self, kwargs):
unencoded_pairs = kwargs
for i in unencoded_pairs.keys():
if isinstance(unencoded_pairs[i], types.UnicodeType):
unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8')
return unencoded_pairs
def generate_express_checkout_redirect_url(self, token):
url_vars = (self.config.PAYPAL_URL_BASE, token)
return '%s?cmd=_express-checkout&token=%s' % url_vars
def _call(self, method, **kwargs):
params = collections.OrderedDict({
'USER': self.config.API_USERID,
'PWD': self.config.API_PASSWORD,
'SIGNATURE': self.config.API_SIGNATURE,
'VERSION': '85.0',
'METHOD': method
})
for key in sorted(kwargs.iterkeys()): # Ordering is important!
params[key] = kwargs[key]
enc_params = urlencode(self._encode_utf8(params))
conn = httplib.HTTPSConnection(self.config.API_ENDPOINT['EC'],
timeout=self.config.HTTP_TIMEOUT)
conn.request('POST', '/nvp/', enc_params)
response = conn.getresponse()
response = PayPalECResponse(response.read(), self.config)
conn.close()
return response
def set_express_checkout(self, receivers, **kwargs):
for index, receiver in enumerate(receivers):
for key, value in receiver.iteritems():
kwargs['PAYMENTREQUEST_%d_%s' % (index, key)] = receiver[key]
return self._call('SetExpressCheckout', **kwargs)
def get_express_checkout_details(self, token, **kwargs):
kwargs['TOKEN'] = token
return self._call('GetExpressCheckoutDetails', **kwargs)
def do_express_checkout_payment(self, **kwargs):
return self._call('DoExpressCheckoutPayment', **kwargs)
| {
"content_hash": "f9864a3eeab9adff81b56ef6b39fa3b0",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 81,
"avg_line_length": 38.20454545454545,
"alnum_prop": 0.6006841165972635,
"repo_name": "softak/webfaction_demo",
"id": "b146a40f767337fc6ab0effee589786203753b1e",
"size": "6724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paypal/interfaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
import server.seed
import datetime
from datetime import timedelta
class Aire(ndb.Model):
id = ndb.StringProperty(indexed=False)
timestamp = ndb.DateTimeProperty()
parameter = ndb.IntegerProperty()
tecnic = ndb.IntegerProperty(indexed=False)
period = ndb.IntegerProperty(indexed=False)
value = ndb.FloatProperty(indexed=False)
ce01 = ndb.IntegerProperty(indexed=False)
ce02 = ndb.IntegerProperty(indexed=False)
ce03 = ndb.IntegerProperty(indexed=False)
station = ndb.StructuredProperty(server.seed.Station)
def AllAire(parameters, year, month, day, hour):
tempTime = datetime.datetime(int(year), int(month), int(day), int(hour), 0)
return Aire.query(Aire.parameter.IN(parameters), Aire.timestamp == tempTime)
def UpdateAire(id, timestamp, parameter, tecnic, period, value, ce01, ce02, ce03, station):
aire = Aire(id=id, timestamp=timestamp, parameter=parameter, tecnic=tecnic, period=period, value=value, ce01=ce01, ce02=ce02, ce03=ce03, station=station)
aire.put()
return aire
def InsertAire(id, timestamp, parameter, tecnic, period, value, ce01, ce02, ce03):
aire = Aire(id=id, timestamp=timestamp, parameter=parameter, tecnic=tecnic, period=period, value=value, ce01=ce01, ce02=ce02, ce03=ce03)
aire.put()
return aire | {
"content_hash": "2cc0840b4fd52ee849541d4bf986488c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 155,
"avg_line_length": 44,
"alnum_prop": 0.7454545454545455,
"repo_name": "emarinizquierdo/breathe-better",
"id": "2d393ecbdeabdab4127af11c5b5fe5f0b8a7b346",
"size": "1320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "74378"
},
{
"name": "HTML",
"bytes": "839891"
},
{
"name": "JavaScript",
"bytes": "100158"
},
{
"name": "Python",
"bytes": "15024"
}
],
"symlink_target": ""
} |
import pandas
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from datetime import timedelta
from random import randint
import datetime
from sklearn.cluster import KMeans
#Read data
dateparse = lambda dates: pandas.datetime.strptime(dates, '%Y-%m')
#input_data = pandas.read_csv("input.csv")
input_data = pandas.read_csv("damnm.csv", parse_dates='date', index_col='date', date_parser=dateparse)
print input_data.index
print input_data.shape
print input_data.columns
plt.hist (input_data['pperhour'])
#plt.show()
#Remove any rows without price
input_data = input_data[input_data["pperhour"] > 0]
input_data = input_data.dropna(axis = 0)
print input_data.corr()["pperhour"]
#GET ALL the columsn from dataFrame
columns = input_data.columns.tolist()
#Filter the columns that we dont want
columns = [c for c in columns if c not in ["job", "pperhour"]]
target = "pperhour"
#GENERATE TEST and TRAINING SET
train = input_data.sample(frac=0.8, random_state=1)
test = input_data.loc[~input_data.index.isin(train.index)]
print train.shape
print test.shape
model = LinearRegression()
# Fit the model to the training data.
model.fit(train[columns], train[target])
# Generate our predictions for the test set.
#x = np.array(train[target][:, 1].A1)
predictions = model.predict(test[columns])
#print test
#print predictions
| {
"content_hash": "ae689f38b1dc5946881f272c240451a7",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 102,
"avg_line_length": 27.140350877192983,
"alnum_prop": 0.7634130575307045,
"repo_name": "sudikrt/costproML",
"id": "dc82aea06da0c53d35d120952e51bd5debaae772",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "85031"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import os
from fabric.api import hide, get
from fabric.contrib.files import upload_template, contains
from fabric.context_managers import lcd
from utils import FabricTest, eq_contents
from server import server
class TestContrib(FabricTest):
# Make sure it knows / is a directory.
# This is in lieu of starting down the "actual honest to god fake operating
# system" road...:(
@server(responses={'test -d "$(echo /)"': ""})
def test_upload_template_uses_correct_remote_filename(self):
"""
upload_template() shouldn't munge final remote filename
"""
template = self.mkfile('template.txt', 'text')
with hide('everything'):
upload_template(template, '/')
assert self.exists_remotely('/template.txt')
@server()
def test_upload_template_handles_file_destination(self):
"""
upload_template() should work OK with file and directory destinations
"""
template = self.mkfile('template.txt', '%(varname)s')
local = self.path('result.txt')
remote = '/configfile.txt'
var = 'foobar'
with hide('everything'):
upload_template(template, remote, {'varname': var})
get(remote, local)
eq_contents(local, var)
@server()
def test_upload_template_handles_template_dir(self):
"""
upload_template() should work OK with template dir
"""
template = self.mkfile('template.txt', '%(varname)s')
template_dir = os.path.dirname(template)
local = self.path('result.txt')
remote = '/configfile.txt'
var = 'foobar'
with hide('everything'):
upload_template(
'template.txt', remote, {'varname': var},
template_dir=template_dir
)
get(remote, local)
eq_contents(local, var)
@server(responses={
'egrep "text" "/file.txt"': (
"sudo: unable to resolve host fabric",
"",
1
)}
)
def test_contains_checks_only_succeeded_flag(self):
"""
contains() should return False on bad grep even if stdout isn't empty
"""
with hide('everything'):
result = contains('/file.txt', 'text', use_sudo=True)
assert result == False
@server(responses={
r'egrep "Include other\\.conf" "$(echo /etc/apache2/apache2.conf)"': "Include other.conf"
})
def test_contains_performs_case_sensitive_search(self):
"""
contains() should perform a case-sensitive search by default.
"""
with hide('everything'):
result = contains('/etc/apache2/apache2.conf', 'Include other.conf',
use_sudo=True)
assert result == True
@server(responses={
r'egrep -i "include Other\\.CONF" "$(echo /etc/apache2/apache2.conf)"': "Include other.conf"
})
def test_contains_performs_case_insensitive_search(self):
"""
contains() should perform a case-insensitive search when passed `case_sensitive=False`
"""
with hide('everything'):
result = contains('/etc/apache2/apache2.conf',
'include Other.CONF',
use_sudo=True,
case_sensitive=False)
assert result == True
@server()
def test_upload_template_handles_jinja_template(self):
"""
upload_template() should work OK with Jinja2 template
"""
template = self.mkfile('template_jinja2.txt', '{{ first_name }}')
template_name = os.path.basename(template)
template_dir = os.path.dirname(template)
local = self.path('result.txt')
remote = '/configfile.txt'
first_name = u'S\u00E9bastien'
with hide('everything'):
upload_template(template_name, remote, {'first_name': first_name},
use_jinja=True, template_dir=template_dir)
get(remote, local)
eq_contents(local, first_name.encode('utf-8'))
@server()
def test_upload_template_jinja_and_no_template_dir(self):
# Crummy doesn't-die test
fname = "foo.tpl"
try:
with hide('everything'):
with open(fname, 'w+') as fd:
fd.write('whatever')
upload_template(fname, '/configfile.txt', {}, use_jinja=True)
finally:
os.remove(fname)
def test_upload_template_obeys_lcd(self):
for jinja in (True, False):
for mirror in (True, False):
self._upload_template_obeys_lcd(jinja=jinja, mirror=mirror)
@server()
def _upload_template_obeys_lcd(self, jinja, mirror):
template_content = {True: '{{ varname }}s', False: '%(varname)s'}
template_dir = 'template_dir'
template_name = 'template.txt'
if not self.exists_locally(self.path(template_dir)):
os.mkdir(self.path(template_dir))
self.mkfile(
os.path.join(template_dir, template_name), template_content[jinja]
)
remote = '/configfile.txt'
var = 'foobar'
with hide('everything'):
with lcd(self.path(template_dir)):
upload_template(
template_name, remote, {'varname': var},
mirror_local_mode=mirror
)
| {
"content_hash": "df116b96c7012abe7f753f2d67292141",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 100,
"avg_line_length": 35.38709677419355,
"alnum_prop": 0.5666362807657247,
"repo_name": "cmattoon/fabric",
"id": "1143a618a2b437e3ab62e67a4c5b645b790476f5",
"size": "5509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_contrib.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "469441"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Callable, ClassVar, Iterator, Optional, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.collection import Collection
from pants.engine.environment import EnvironmentName
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AllTargets,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
Field,
InvalidFieldException,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
Targets,
)
from pants.engine.unions import union
from pants.util.docutil import bin_name, doc_url
from pants.util.strutil import softwrap
# Common help text to be applied to each field that supports value interpolation.
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args."
)
class DockerImageBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = softwrap(
"""
Build arguments (`--build-arg`) to use when building this image.
Entries are either strings in the form `ARG_NAME=value` to set an explicit value;
or just `ARG_NAME` to copy the value from Pants's own environment.
Use `[docker].build_args` to set default build args for all images.
"""
)
class DockerImageContextRootField(StringField):
alias = "context_root"
help = softwrap(
"""
Specify which directory to use as the Docker build context root. This affects the file
paths to use for the `COPY` and `ADD` instructions. For example, whether
`COPY files/f.txt` should look for the file relative to the build root:
`<build root>/files/f.txt` vs relative to the BUILD file:
`<build root>/path_to_build_file/files/f.txt`.
Specify the `context_root` path as `files` for relative to build root, or as `./files`
for relative to the BUILD file.
If `context_root` is not specified, it defaults to `[docker].default_context_root`.
"""
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address=address)
if isinstance(value_or_default, str) and value_or_default.startswith("/"):
val = value_or_default.strip("/")
raise InvalidFieldException(
softwrap(
f"""
The `{cls.alias}` field in target {address} must be a relative path, but was
{value_or_default!r}. Use {val!r} for a path relative to the build root, or
{'./' + val!r} for a path relative to the BUILD file
(i.e. {os.path.join(address.spec_path, val)!r}).
"""
)
)
return value_or_default
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
# to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case
# we generate the Dockerfile instead. If there are no `instructions`, or there are both
# `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message
# to the user.
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = softwrap(
"""
The Dockerfile to use when building the Docker image.
Use the `instructions` field instead if you prefer not having the Dockerfile in your
source tree.
"""
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = softwrap(
"""
The `Dockerfile` content, typically one instruction per list item.
Use the `source` field instead if you prefer having the Dockerfile in your source tree.
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = softwrap(
f"""
Any tags to apply to the Docker image name (the version is usually applied as a tag).
{_interpolation_help.format(kind="tag")}
See {doc_url('tagging-docker-images')}.
"""
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = softwrap(
"""
Specify target build stage, rather than building the entire `Dockerfile`.
When using multi-stage build, you may name your stages, and can target them when building
to only selectively build a certain stage. See also the `--docker-build-target-stage`
option.
Read more about [multi-stage Docker builds](https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)
"""
)
class DockerImageDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerImageRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = softwrap(
"""
List of addresses or configured aliases to any Docker registries to use for the
built image.
The address is a domain name with optional port for your registry, and any registry
aliases are prefixed with `@` for addresses in the [docker].registries configuration
section.
By default, all configured registries with `default = true` are used.
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
The above example shows two valid `registry` options: using an alias to a configured
registry and the address to a registry verbatim in the BUILD file.
"""
)
class DockerImageRepositoryField(StringField):
alias = "repository"
help = softwrap(
f"""
The repository name for the Docker image. e.g. "<repository>/<name>".
It uses the `[docker].default_repository` by default.
{_interpolation_help.format(kind="repository")}
Additional placeholders for the repository field are: `name`, `directory`,
`parent_directory`, and `default_repository`.
Registries may also configure the repository value for specific registries.
See the documentation for `[docker].default_repository` for more information.
"""
)
class DockerImageSkipPushField(BoolField):
alias = "skip_push"
default = False
help = (
f"If set to true, do not push this image to registries when running `{bin_name()} publish`."
)
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
"""Inherit this mixin class to provide options to `docker build`."""
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
"""Subclasses must implement this, to turn their `self.value` into none, one or more option
values."""
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = softwrap(
f"""
Provide image metadata.
{_interpolation_help.format(kind="label value")}
See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/#manage-labels-on-objects)
for more information.
"""
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerImageBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = softwrap(
"""
Secret files to expose to the build (only if BuildKit enabled).
Secrets may use absolute paths, or paths relative to your build root, or the BUILD file
if prefixed with `./`. The id should be valid as used by the Docker build `--secret`
option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more
information.
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
# os.path.join() discards preceding parts if encountering an abs path, e.g. if the secret
# `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also,
# an empty path part is ignored.
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = softwrap(
"""
SSH agent socket or keys to expose to the build (only if BuildKit enabled)
(format: default|<id>[=<socket>|<key>[,<key>]])
The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in
your `RUN` instructions:
RUN --mount=type=ssh ...
See [Docker documentation](https://docs.docker.com/develop/develop-images/build_enhancements/#using-ssh-to-access-private-data-in-builds)
for more information.
"""
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerBuildOptionFieldValueMixin(Field):
"""Inherit this mixin class to provide unary options (i.e. option in the form of `--flag=value`)
to `docker build`."""
docker_build_option: ClassVar[str]
@final
def options(self) -> Iterator[str]:
yield f"{self.docker_build_option}={self.value}"
class DockerImageBuildPullOptionField(DockerBuildOptionFieldValueMixin, BoolField):
alias = "pull"
default = True
help = softwrap(
"""
If true, then docker will always attempt to pull a newer version of the image.
Useful to disable it when building images from other intermediate goals.
"""
)
docker_build_option = "--pull"
class DockerBuildOptionFlagFieldMixin(BoolField, ABC):
"""Inherit this mixin class to provide optional flags (i.e. add `--flag` only when the value is
`True`) to `docker build`."""
docker_build_option: ClassVar[str]
@final
def options(self) -> Iterator[str]:
if self.value:
yield f"{self.docker_build_option}"
class DockerImageBuildSquashOptionField(DockerBuildOptionFlagFieldMixin):
alias = "squash"
default = False
help = softwrap(
"""
If true, then docker will squash newly built layers into a single new layer.
Note that this option is only supported on a Docker daemon with experimental features enabled.
"""
)
docker_build_option = "--squash"
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerImageBuildArgsField,
DockerImageDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageContextRootField,
DockerImageTagsField,
DockerImageRegistriesField,
DockerImageRepositoryField,
DockerImageBuildImageLabelsOptionField,
DockerImageBuildSecretsOptionField,
DockerImageBuildSSHOptionField,
DockerImageSkipPushField,
DockerImageTargetStageField,
DockerImageBuildPullOptionField,
DockerImageBuildSquashOptionField,
RestartableField,
)
help = softwrap(
"""
The `docker_image` target describes how to build and tag a Docker image.
Any dependencies, as inferred or explicitly specified, will be included in the Docker
build context, after being packaged if applicable.
By default, will use a Dockerfile from the same directory as the BUILD file this target
is defined in. Point at another file with the `source` field, or use the `instructions`
field to have the Dockerfile contents verbatim directly in the BUILD file.
Dependencies on upstream/base images defined by another `docker_image` are inferred if
referenced by a build argument with a default value of the target address.
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
@union(in_scope_types=[EnvironmentName])
@dataclass(frozen=True)
class DockerImageTagsRequest:
"""A request to provide additional image tags."""
target: Target
@classmethod
def is_applicable(cls, target: Target) -> bool:
"""Whether to provide additional tags for this target or not."""
return True
class DockerImageTags(Collection[str]):
"""Additional image tags to apply to built Docker images."""
class AllDockerImageTargets(Targets):
pass
@rule
def all_docker_targets(all_targets: AllTargets) -> AllDockerImageTargets:
return AllDockerImageTargets(
[tgt for tgt in all_targets if tgt.has_field(DockerImageSourceField)]
)
def rules():
return collect_rules()
| {
"content_hash": "ed2e7c7be65cf4838fd8748ce47c44d2",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 148,
"avg_line_length": 32.869281045751634,
"alnum_prop": 0.6535427851792934,
"repo_name": "benjyw/pants",
"id": "28653c0958d6ea5f0afbc4d48471e2a3fbdfc100",
"size": "15219",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/docker/target_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
} |
try:
from msrest.exceptions import (
ClientException,
SerializationError,
DeserializationError,
TokenExpiredError,
ClientRequestError,
AuthenticationError,
HttpOperationError,
)
except ImportError:
raise ImportError("You need to install 'msrest' to use this feature")
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
raise ImportError("You need to install 'msrestazure' to use this feature")
| {
"content_hash": "efa799915a406e77f62de0f0a6dedf87",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.6782945736434108,
"repo_name": "dstrockis/outlook-autocategories",
"id": "4db6a136b512ada1569351839279121cedf15700",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/azure/common/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39286"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Jupyter Notebook",
"bytes": "163002"
},
{
"name": "Python",
"bytes": "11957653"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError, models, transaction
from django.db.models.query import QuerySet
from django.template.defaultfilters import slugify as default_slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from taggit.utils import _get_field
try:
from unidecode import unidecode
except ImportError:
unidecode = lambda tag: tag
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError: # django < 1.7
from django.contrib.contenttypes.generic import GenericForeignKey
try:
atomic = transaction.atomic
except AttributeError:
from contextlib import contextmanager
@contextmanager
def atomic(using=None):
sid = transaction.savepoint(using=using)
try:
yield
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
raise
else:
transaction.savepoint_commit(sid, using=using)
@python_2_unicode_compatible
class TagBase(models.Model):
name = models.CharField(verbose_name=_('Name'), unique=True, max_length=100)
slug = models.SlugField(verbose_name=_('Slug'), unique=True, max_length=100)
def __str__(self):
return self.name
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = self.slugify(self.name)
from django.db import router
using = kwargs.get("using") or router.db_for_write(
type(self), instance=self)
# Make sure we write to the same db for all attempted writes,
# with a multi-master setup, theoretically we could try to
# write and rollback on different DBs
kwargs["using"] = using
# Be oportunistic and try to save the tag, this should work for
# most cases ;)
try:
with atomic(using=using):
res = super(TagBase, self).save(*args, **kwargs)
return res
except IntegrityError:
pass
# Now try to find existing slugs with similar names
slugs = set(
self.__class__._default_manager
.filter(slug__startswith=self.slug)
.values_list('slug', flat=True)
)
i = 1
while True:
slug = self.slugify(self.name, i)
if slug not in slugs:
self.slug = slug
# We purposely ignore concurrecny issues here for now.
# (That is, till we found a nice solution...)
return super(TagBase, self).save(*args, **kwargs)
i += 1
else:
return super(TagBase, self).save(*args, **kwargs)
def slugify(self, tag, i=None):
slug = default_slugify(unidecode(tag))
if i is not None:
slug += "_%d" % i
return slug
class Tag(TagBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
@python_2_unicode_compatible
class ItemBase(models.Model):
def __str__(self):
return ugettext("%(object)s tagged with %(tag)s") % {
"object": self.content_object,
"tag": self.tag
}
class Meta:
abstract = True
@classmethod
def tag_model(cls):
return _get_field(cls, 'tag').rel.to
@classmethod
def tag_relname(cls):
return _get_field(cls, 'tag').rel.related_name
@classmethod
def lookup_kwargs(cls, instance):
return {
'content_object': instance
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
return {
"content_object__in": instances,
}
class TaggedItemBase(ItemBase):
tag = models.ForeignKey(Tag, related_name="%(app_label)s_%(class)s_items")
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class GenericTaggedItemBase(ItemBase):
object_id = models.CharField(verbose_name=_('Object id'), db_index=True, max_length=100)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('Content type'),
related_name="%(app_label)s_%(class)s_tagged_items"
)
content_object = GenericForeignKey()
class Meta:
abstract = True
@classmethod
def lookup_kwargs(cls, instance):
return {
'object_id': instance.pk,
'content_type': ContentType.objects.get_for_model(instance)
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
if isinstance(instances, QuerySet):
# Can do a real object_id IN (SELECT ..) query.
return {
"object_id__in": instances,
"content_type": ContentType.objects.get_for_model(instances.model),
}
else:
# TODO: instances[0], can we assume there are instances.
return {
"object_id__in": [instance.pk for instance in instances],
"content_type": ContentType.objects.get_for_model(instances[0]),
}
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
ct = ContentType.objects.get_for_model(model)
kwargs = {
"%s__content_type" % cls.tag_relname(): ct
}
if instance is not None:
kwargs["%s__object_id" % cls.tag_relname()] = instance.pk
if extra_filters:
kwargs.update(extra_filters)
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedItem(GenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
if django.VERSION >= (1, 5):
index_together = [
["content_type", "object_id"],
]
| {
"content_hash": "cac499ed9771523265cff3088ad1dfbd",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 92,
"avg_line_length": 31.66826923076923,
"alnum_prop": 0.5873690602702293,
"repo_name": "7kfpun/django-taggit",
"id": "0418beb7dbb3517ceefd14aee10312c6f62bc47d",
"size": "6587",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "taggit/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "91654"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0017_nonnullable_char_fields'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='no_invoice_reason',
field=models.CharField(blank=True, default='', max_length=256),
preserve_default=False,
),
migrations.AlterField(
model_name='subscription',
name='salesforce_contract_id',
field=models.CharField(blank=True, default='', max_length=80),
preserve_default=False,
),
]
| {
"content_hash": "aa014dc9ca13c30a7fe5b3ab511bb0c9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 28.608695652173914,
"alnum_prop": 0.5835866261398176,
"repo_name": "dimagi/commcare-hq",
"id": "14e2328b0f63b94342c02528c7fa92b5b768015a",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/accounting/migrations/0018_alter_nonnullable_char_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
"""
gurl.py
Created by Greg Neagle on 2013-11-21.
Modified in Feb 2016 to add support for NSURLSession.
Updated June 2019 for compatibility with Python 3 and PyObjC 5.1.2+
curl replacement using NSURLConnection and friends
Tested with PyObjC 2.5.1 (inlcuded with macOS)
and with PyObjC 5.2b1. Should also work with PyObjC 5.1.2.
May fail with other versions of PyObjC due to issues with completion handler
signatures.
"""
from __future__ import absolute_import, print_function
import os
import xattr
try:
# Python 2
from urlparse import urlparse
except ImportError:
# Python 3
from urllib.parse import urlparse
# builtin super doesn't work with Cocoa classes in recent PyObjC releases.
# pylint: disable=redefined-builtin,no-name-in-module
from objc import super
# pylint: enable=redefined-builtin,no-name-in-module
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from Foundation import (NSBundle, NSRunLoop, NSData, NSDate,
NSObject, NSURL, NSURLConnection,
NSMutableURLRequest,
NSURLRequestReloadIgnoringLocalCacheData,
NSURLResponseUnknownLength,
NSLog,
NSURLCredential, NSURLCredentialPersistenceNone,
NSPropertyListSerialization,
NSPropertyListMutableContainersAndLeaves,
NSPropertyListXMLFormat_v1_0)
try:
from Foundation import NSURLSession, NSURLSessionConfiguration
from CFNetwork import (kCFNetworkProxiesHTTPSEnable,
kCFNetworkProxiesHTTPEnable)
NSURLSESSION_AVAILABLE = True
except ImportError:
NSURLSESSION_AVAILABLE = False
# Disable PyLint complaining about 'invalid' names
# pylint: disable=C0103
if NSURLSESSION_AVAILABLE:
# NSURLSessionAuthChallengeDisposition enum constants
NSURLSessionAuthChallengeUseCredential = 0
NSURLSessionAuthChallengePerformDefaultHandling = 1
NSURLSessionAuthChallengeCancelAuthenticationChallenge = 2
NSURLSessionAuthChallengeRejectProtectionSpace = 3
# NSURLSessionResponseDisposition enum constants
NSURLSessionResponseCancel = 0
NSURLSessionResponseAllow = 1
NSURLSessionResponseBecomeDownload = 2
# TLS/SSLProtocol enum constants
kSSLProtocolUnknown = 0
kSSLProtocol3 = 2
kTLSProtocol1 = 4
kTLSProtocol11 = 7
kTLSProtocol12 = 8
kDTLSProtocol1 = 9
# define a helper function for block callbacks
import ctypes
import objc
CALLBACK_HELPER_AVAILABLE = True
try:
_objc_so = ctypes.cdll.LoadLibrary(
os.path.join(objc.__path__[0], '_objc.so'))
except OSError:
# could not load _objc.so
CALLBACK_HELPER_AVAILABLE = False
else:
PyObjCMethodSignature_WithMetaData = (
_objc_so.PyObjCMethodSignature_WithMetaData)
PyObjCMethodSignature_WithMetaData.restype = ctypes.py_object
def objc_method_signature(signature_str):
'''Return a PyObjCMethodSignature given a call signature in string
format'''
return PyObjCMethodSignature_WithMetaData(
ctypes.create_string_buffer(signature_str), None, False)
# pylint: enable=E0611
# disturbing hack warning!
# this works around an issue with App Transport Security on 10.11
bundle = NSBundle.mainBundle()
info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
info['NSAppTransportSecurity'] = {'NSAllowsArbitraryLoads': True}
def NSLogWrapper(message):
'''A wrapper function for NSLog to prevent format string errors'''
NSLog('%@', message)
ssl_error_codes = {
-9800: u'SSL protocol error',
-9801: u'Cipher Suite negotiation failure',
-9802: u'Fatal alert',
-9803: u'I/O would block (not fatal)',
-9804: u'Attempt to restore an unknown session',
-9805: u'Connection closed gracefully',
-9806: u'Connection closed via error',
-9807: u'Invalid certificate chain',
-9808: u'Bad certificate format',
-9809: u'Underlying cryptographic error',
-9810: u'Internal error',
-9811: u'Module attach failure',
-9812: u'Valid cert chain, untrusted root',
-9813: u'Cert chain not verified by root',
-9814: u'Chain had an expired cert',
-9815: u'Chain had a cert not yet valid',
-9816: u'Server closed session with no notification',
-9817: u'Insufficient buffer provided',
-9818: u'Bad SSLCipherSuite',
-9819: u'Unexpected message received',
-9820: u'Bad MAC',
-9821: u'Decryption failed',
-9822: u'Record overflow',
-9823: u'Decompression failure',
-9824: u'Handshake failure',
-9825: u'Misc. bad certificate',
-9826: u'Bad unsupported cert format',
-9827: u'Certificate revoked',
-9828: u'Certificate expired',
-9829: u'Unknown certificate',
-9830: u'Illegal parameter',
-9831: u'Unknown Cert Authority',
-9832: u'Access denied',
-9833: u'Decoding error',
-9834: u'Decryption error',
-9835: u'Export restriction',
-9836: u'Bad protocol version',
-9837: u'Insufficient security',
-9838: u'Internal error',
-9839: u'User canceled',
-9840: u'No renegotiation allowed',
-9841: u'Peer cert is valid, or was ignored if verification disabled',
-9842: u'Server has requested a client cert',
-9843: u'Peer host name mismatch',
-9844: u'Peer dropped connection before responding',
-9845: u'Decryption failure',
-9846: u'Bad MAC',
-9847: u'Record overflow',
-9848: u'Configuration error',
-9849: u'Unexpected (skipped) record in DTLS'}
class Gurl(NSObject):
'''A class for getting content from a URL
using NSURLConnection/NSURLSession and friends'''
# since we inherit from NSObject, PyLint issues a few bogus warnings
# pylint: disable=W0232,E1002
# Don't want to define the attributes twice that are initialized in
# initWithOptions_(), so:
# pylint: disable=E1101,W0201
GURL_XATTR = 'com.googlecode.munki.downloadData'
def initWithOptions_(self, options):
'''Set up our Gurl object'''
self = super(Gurl, self).init()
if not self:
return None
self.follow_redirects = options.get('follow_redirects', False)
self.ignore_system_proxy = options.get('ignore_system_proxy', False)
self.destination_path = options.get('file')
self.can_resume = options.get('can_resume', False)
self.url = options.get('url')
self.additional_headers = options.get('additional_headers', {})
self.username = options.get('username')
self.password = options.get('password')
self.download_only_if_changed = options.get(
'download_only_if_changed', False)
self.cache_data = options.get('cache_data')
self.connection_timeout = options.get('connection_timeout', 60)
if NSURLSESSION_AVAILABLE:
self.minimum_tls_protocol = options.get(
'minimum_tls_protocol', kTLSProtocol1)
self.log = options.get('logging_function', NSLogWrapper)
self.resume = False
self.response = None
self.headers = None
self.status = None
self.error = None
self.SSLerror = None
self.done = False
self.redirection = []
self.destination = None
self.bytesReceived = 0
self.expectedLength = -1
self.percentComplete = 0
self.connection = None
self.session = None
self.task = None
return self
def start(self):
'''Start the connection'''
if not self.destination_path:
self.log('No output file specified.')
self.done = True
return
url = NSURL.URLWithString_(self.url)
request = (
NSMutableURLRequest.requestWithURL_cachePolicy_timeoutInterval_(
url, NSURLRequestReloadIgnoringLocalCacheData,
self.connection_timeout))
if self.additional_headers:
for header, value in self.additional_headers.items():
request.setValue_forHTTPHeaderField_(value, header)
# does the file already exist? See if we can resume a partial download
if os.path.isfile(self.destination_path):
stored_data = self.getStoredHeaders()
if (self.can_resume and 'expected-length' in stored_data and
('last-modified' in stored_data or 'etag' in stored_data)):
# we have a partial file and we're allowed to resume
self.resume = True
local_filesize = os.path.getsize(self.destination_path)
byte_range = 'bytes=%s-' % local_filesize
request.setValue_forHTTPHeaderField_(byte_range, 'Range')
if self.download_only_if_changed and not self.resume:
stored_data = self.cache_data or self.getStoredHeaders()
if 'last-modified' in stored_data:
request.setValue_forHTTPHeaderField_(
stored_data['last-modified'], 'if-modified-since')
if 'etag' in stored_data:
request.setValue_forHTTPHeaderField_(
stored_data['etag'], 'if-none-match')
if NSURLSESSION_AVAILABLE:
configuration = (
NSURLSessionConfiguration.defaultSessionConfiguration())
# optional: ignore system http/https proxies (10.9+ only)
if self.ignore_system_proxy is True:
configuration.setConnectionProxyDictionary_(
{kCFNetworkProxiesHTTPEnable: False,
kCFNetworkProxiesHTTPSEnable: False})
# set minimum supported TLS protocol (defaults to TLS1)
configuration.setTLSMinimumSupportedProtocol_(
self.minimum_tls_protocol)
self.session = (
NSURLSession.sessionWithConfiguration_delegate_delegateQueue_(
configuration, self, None))
self.task = self.session.dataTaskWithRequest_(request)
self.task.resume()
else:
self.connection = NSURLConnection.alloc().initWithRequest_delegate_(
request, self)
def cancel(self):
'''Cancel the connection'''
if self.connection:
if NSURLSESSION_AVAILABLE:
self.session.invalidateAndCancel()
else:
self.connection.cancel()
self.done = True
def isDone(self):
'''Check if the connection request is complete. As a side effect,
allow the delegates to work by letting the run loop run for a bit'''
if self.done:
return self.done
# let the delegates do their thing
NSRunLoop.currentRunLoop().runUntilDate_(
NSDate.dateWithTimeIntervalSinceNow_(.1))
return self.done
def getStoredHeaders(self):
'''Returns any stored headers for self.destination_path'''
# try to read stored headers
try:
stored_plist_bytestr = xattr.getxattr(
self.destination_path, self.GURL_XATTR)
except (KeyError, IOError):
return {}
data = NSData.dataWithBytes_length_(
stored_plist_bytestr, len(stored_plist_bytestr))
dataObject, _plistFormat, error = (
NSPropertyListSerialization.
propertyListFromData_mutabilityOption_format_errorDescription_(
data, NSPropertyListMutableContainersAndLeaves, None, None))
if error:
return {}
return dataObject
def storeHeaders_(self, headers):
'''Store dictionary data as an xattr for self.destination_path'''
plistData, error = (
NSPropertyListSerialization.
dataFromPropertyList_format_errorDescription_(
headers, NSPropertyListXMLFormat_v1_0, None))
if error:
byte_string = b''
else:
try:
byte_string = bytes(plistData)
except NameError:
byte_string = str(plistData)
try:
xattr.setxattr(self.destination_path, self.GURL_XATTR, byte_string)
except IOError as err:
self.log('Could not store metadata to %s: %s'
% (self.destination_path, err))
def normalizeHeaderDict_(self, a_dict):
'''Since HTTP header names are not case-sensitive, we normalize a
dictionary of HTTP headers by converting all the key names to
lower case'''
# yes, we don't use 'self'!
# pylint: disable=R0201
new_dict = {}
for key, value in a_dict.items():
new_dict[key.lower()] = value
return new_dict
def recordError_(self, error):
'''Record any error info from completed connection/session'''
self.error = error
# If this was an SSL error, try to extract the SSL error code.
if 'NSUnderlyingError' in error.userInfo():
ssl_code = error.userInfo()['NSUnderlyingError'].userInfo().get(
'_kCFNetworkCFStreamSSLErrorOriginalValue', None)
if ssl_code:
self.SSLerror = (ssl_code, ssl_error_codes.get(
ssl_code, 'Unknown SSL error'))
def removeExpectedSizeFromStoredHeaders(self):
'''If a successful transfer, clear the expected size so we
don\'t attempt to resume the download next time'''
if str(self.status).startswith('2'):
# remove the expected-size from the stored headers
headers = self.getStoredHeaders()
if 'expected-length' in headers:
del headers['expected-length']
self.storeHeaders_(headers)
def URLSession_task_didCompleteWithError_(self, _session, _task, error):
'''NSURLSessionTaskDelegate method.'''
if self.destination and self.destination_path:
self.destination.close()
self.removeExpectedSizeFromStoredHeaders()
if error:
self.recordError_(error)
self.done = True
def connection_didFailWithError_(self, _connection, error):
'''NSURLConnectionDelegate method
Sent when a connection fails to load its request successfully.'''
self.recordError_(error)
self.done = True
if self.destination and self.destination_path:
self.destination.close()
def connectionDidFinishLoading_(self, _connection):
'''NSURLConnectionDataDelegate method
Sent when a connection has finished loading successfully.'''
self.done = True
if self.destination and self.destination_path:
self.destination.close()
self.removeExpectedSizeFromStoredHeaders()
def handleResponse_withCompletionHandler_(
self, response, completionHandler):
'''Handle the response to the connection'''
self.response = response
self.bytesReceived = 0
self.percentComplete = -1
self.expectedLength = response.expectedContentLength()
download_data = {}
if response.className() == u'NSHTTPURLResponse':
# Headers and status code only available for HTTP/S transfers
self.status = response.statusCode()
self.headers = dict(response.allHeaderFields())
normalized_headers = self.normalizeHeaderDict_(self.headers)
if 'last-modified' in normalized_headers:
download_data['last-modified'] = normalized_headers[
'last-modified']
if 'etag' in normalized_headers:
download_data['etag'] = normalized_headers['etag']
download_data['expected-length'] = self.expectedLength
# self.destination is defined in initWithOptions_
# pylint: disable=E0203
if not self.destination and self.destination_path:
if self.status == 206 and self.resume:
# 206 is Partial Content response
stored_data = self.getStoredHeaders()
if (not stored_data or
stored_data.get('etag') != download_data.get('etag') or
stored_data.get('last-modified') != download_data.get(
'last-modified')):
# file on server is different than the one
# we have a partial for
self.log(
'Can\'t resume download; file on server has changed.')
if completionHandler:
# tell the session task to cancel
completionHandler(NSURLSessionResponseCancel)
else:
# cancel the connection
self.connection.cancel()
self.log('Removing %s' % self.destination_path)
os.unlink(self.destination_path)
# restart and attempt to download the entire file
self.log(
'Restarting download of %s' % self.destination_path)
os.unlink(self.destination_path)
self.start()
return
# try to resume
self.log('Resuming download for %s' % self.destination_path)
# add existing file size to bytesReceived so far
local_filesize = os.path.getsize(self.destination_path)
self.bytesReceived = local_filesize
self.expectedLength += local_filesize
# open file for append
self.destination = open(self.destination_path, 'ab')
elif str(self.status).startswith('2'):
# not resuming, just open the file for writing
self.destination = open(self.destination_path, 'wb')
# store some headers with the file for use if we need to resume
# the download and for future checking if the file on the server
# has changed
self.storeHeaders_(download_data)
if completionHandler:
# tell the session task to continue
completionHandler(NSURLSessionResponseAllow)
def URLSession_dataTask_didReceiveResponse_completionHandler_(
self, _session, _task, response, completionHandler):
'''NSURLSessionDataDelegate method'''
if CALLBACK_HELPER_AVAILABLE:
completionHandler.__block_signature__ = objc_method_signature(b'v@i')
self.handleResponse_withCompletionHandler_(response, completionHandler)
def connection_didReceiveResponse_(self, _connection, response):
'''NSURLConnectionDataDelegate delegate method
Sent when the connection has received sufficient data to construct the
URL response for its request.'''
self.handleResponse_withCompletionHandler_(response, None)
def handleRedirect_newRequest_withCompletionHandler_(
self, response, request, completionHandler):
'''Handle the redirect request'''
def allowRedirect():
'''Allow the redirect'''
if completionHandler:
completionHandler(request)
return None
return request
def denyRedirect():
'''Deny the redirect'''
if completionHandler:
completionHandler(None)
return None
newURL = request.URL().absoluteString()
if response is None:
# the request has changed the NSURLRequest in order to standardize
# its format, for example, changing a request for
# http://www.apple.com to http://www.apple.com/. This occurs because
# the standardized, or canonical, version of the request is used for
# cache management. Pass the request back as-is
# (it appears that at some point Apple also defined a redirect like
# http://developer.apple.com to https://developer.apple.com to be
# 'merely' a change in the canonical URL.)
# Further -- it appears that this delegate method isn't called at
# all in this scenario, unlike NSConnectionDelegate method
# connection:willSendRequest:redirectResponse:
# we'll leave this here anyway in case we're wrong about that
self.log('Allowing redirect to: %s' % newURL)
return allowRedirect()
# If we get here, it appears to be a real redirect attempt
# Annoyingly, we apparently can't get access to the headers from the
# site that told us to redirect. All we know is that we were told
# to redirect and where the new location is.
self.redirection.append([newURL, dict(response.allHeaderFields())])
newParsedURL = urlparse(newURL)
# This code was largely based on the work of Andreas Fuchs
# (https://github.com/munki/munki/pull/465)
if self.follow_redirects is True or self.follow_redirects == 'all':
# Allow the redirect
self.log('Allowing redirect to: %s' % newURL)
return allowRedirect()
elif (self.follow_redirects == 'https'
and newParsedURL.scheme == 'https'):
# Once again, allow the redirect
self.log('Allowing redirect to: %s' % newURL)
return allowRedirect()
# If we're down here either the preference was set to 'none',
# the url we're forwarding on to isn't https or follow_redirects
# was explicitly set to False
self.log('Denying redirect to: %s' % newURL)
return denyRedirect()
# we don't control the API, so
# pylint: disable=too-many-arguments
def URLSession_task_willPerformHTTPRedirection_newRequest_completionHandler_(
self, _session, _task, response, request, completionHandler):
'''NSURLSessionTaskDelegate method'''
self.log(
'URLSession_task_willPerformHTTPRedirection_newRequest_'
'completionHandler_')
if CALLBACK_HELPER_AVAILABLE:
completionHandler.__block_signature__ = objc_method_signature(b'v@@')
self.handleRedirect_newRequest_withCompletionHandler_(
response, request, completionHandler)
# pylint: enable=too-many-arguments
def connection_willSendRequest_redirectResponse_(
self, _connection, request, response):
'''NSURLConnectionDataDelegate method
Sent when the connection determines that it must change URLs in order
to continue loading a request.'''
self.log('connection_willSendRequest_redirectResponse_')
return self.handleRedirect_newRequest_withCompletionHandler_(
response, request, None)
def connection_canAuthenticateAgainstProtectionSpace_(
self, _connection, protectionSpace):
'''NSURLConnection delegate method
Sent to determine whether the delegate is able to respond to a
protection space’s form of authentication.
Deprecated in 10.10'''
# this is not called in 10.5.x.
self.log('connection_canAuthenticateAgainstProtectionSpace_')
if protectionSpace:
host = protectionSpace.host()
realm = protectionSpace.realm()
authenticationMethod = protectionSpace.authenticationMethod()
self.log('Protection space found. Host: %s Realm: %s AuthMethod: %s'
% (host, realm, authenticationMethod))
if self.username and self.password and authenticationMethod in [
'NSURLAuthenticationMethodDefault',
'NSURLAuthenticationMethodHTTPBasic',
'NSURLAuthenticationMethodHTTPDigest']:
# we know how to handle this
self.log('Can handle this authentication request')
return True
# we don't know how to handle this; let the OS try
self.log('Allowing OS to handle authentication request')
return False
def handleChallenge_withCompletionHandler_(
self, challenge, completionHandler):
'''Handle an authentication challenge'''
protectionSpace = challenge.protectionSpace()
host = protectionSpace.host()
realm = protectionSpace.realm()
authenticationMethod = protectionSpace.authenticationMethod()
self.log(
'Authentication challenge for Host: %s Realm: %s AuthMethod: %s'
% (host, realm, authenticationMethod))
if challenge.previousFailureCount() > 0:
# we have the wrong credentials. just fail
self.log('Previous authentication attempt failed.')
if completionHandler:
completionHandler(
NSURLSessionAuthChallengeCancelAuthenticationChallenge,
None)
else:
challenge.sender().cancelAuthenticationChallenge_(challenge)
if self.username and self.password and authenticationMethod in [
'NSURLAuthenticationMethodDefault',
'NSURLAuthenticationMethodHTTPBasic',
'NSURLAuthenticationMethodHTTPDigest']:
self.log('Will attempt to authenticate.')
self.log('Username: %s Password: %s'
% (self.username, ('*' * len(self.password or ''))))
credential = (
NSURLCredential.credentialWithUser_password_persistence_(
self.username, self.password,
NSURLCredentialPersistenceNone))
if completionHandler:
completionHandler(
NSURLSessionAuthChallengeUseCredential, credential)
else:
challenge.sender().useCredential_forAuthenticationChallenge_(
credential, challenge)
else:
# fall back to system-provided default behavior
self.log('Allowing OS to handle authentication request')
if completionHandler:
completionHandler(
NSURLSessionAuthChallengePerformDefaultHandling, None)
else:
if (challenge.sender().respondsToSelector_(
'performDefaultHandlingForAuthenticationChallenge:')):
self.log('Allowing OS to handle authentication request')
challenge.sender(
).performDefaultHandlingForAuthenticationChallenge_(
challenge)
else:
# Mac OS X 10.6 doesn't support
# performDefaultHandlingForAuthenticationChallenge:
self.log('Continuing without credential.')
challenge.sender(
).continueWithoutCredentialForAuthenticationChallenge_(
challenge)
def connection_willSendRequestForAuthenticationChallenge_(
self, _connection, challenge):
'''NSURLConnection delegate method
Tells the delegate that the connection will send a request for an
authentication challenge. New in 10.7.'''
self.log('connection_willSendRequestForAuthenticationChallenge_')
self.handleChallenge_withCompletionHandler_(challenge, None)
def URLSession_task_didReceiveChallenge_completionHandler_(
self, _session, _task, challenge, completionHandler):
'''NSURLSessionTaskDelegate method'''
if CALLBACK_HELPER_AVAILABLE:
completionHandler.__block_signature__ = objc_method_signature(b'v@i@')
self.log('URLSession_task_didReceiveChallenge_completionHandler_')
self.handleChallenge_withCompletionHandler_(
challenge, completionHandler)
def connection_didReceiveAuthenticationChallenge_(
self, _connection, challenge):
'''NSURLConnection delegate method
Sent when a connection must authenticate a challenge in order to
download its request. Deprecated in 10.10'''
self.log('connection_didReceiveAuthenticationChallenge_')
self.handleChallenge_withCompletionHandler_(challenge, None)
def handleReceivedData_(self, data):
'''Handle received data'''
if self.destination:
self.destination.write(data)
else:
try:
self.log(str(data))
except Exception:
pass
self.bytesReceived += len(data)
if self.expectedLength != NSURLResponseUnknownLength:
# pylint: disable=old-division
self.percentComplete = int(
float(self.bytesReceived)/float(self.expectedLength) * 100.0)
# pylint: enable=old-division
def URLSession_dataTask_didReceiveData_(self, _session, _task, data):
'''NSURLSessionDataDelegate method'''
self.handleReceivedData_(data)
def connection_didReceiveData_(self, _connection, data):
'''NSURLConnectionDataDelegate method
Sent as a connection loads data incrementally'''
self.handleReceivedData_(data)
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| {
"content_hash": "f0ba7a6423a5a7b08cedc451b03aa46b",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 82,
"avg_line_length": 42.80523255813954,
"alnum_prop": 0.6253989813242784,
"repo_name": "erikng/installapplications",
"id": "effac34577f72a792f7295d193c1da19998bfd44",
"size": "30055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payload/Library/installapplications/gurl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66698"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
man = 1
wives = man * 7
madai = wives * 7
cats = madai * 7
kitties = cats * 7
total = man + wives + madai + cats + kitties
print(total)
| {
"content_hash": "2c660b90ea90c0542873acf738460d99",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 44,
"avg_line_length": 19.428571428571427,
"alnum_prop": 0.625,
"repo_name": "1ta/study_python",
"id": "b860c3a59cb8de8dc50176bb798930cb302a5671",
"size": "136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/sun/chapter_1/6.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "2974"
},
{
"name": "Python",
"bytes": "27948"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='dish',
version=version,
description="Distributed shared memory tools",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='distributed shared memory quorum',
author='Matthew Desmarais',
author_email='matthew.desmarais@gmail.com',
url='',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'eventlet',
],
entry_points="""
# -*- Entry points: -*-
""",
)
| {
"content_hash": "60a2ea14480854ca4be4b53d7a487e47",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 95,
"avg_line_length": 27.96153846153846,
"alnum_prop": 0.6052269601100413,
"repo_name": "desmaj/dish",
"id": "7f06a8b4eb35b3a51b320bb55734e80c60a70f83",
"size": "727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9572"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
import sympy as sp
import numpy.testing as npt
from simupy.systems.symbolic import DynamicalSystem, dynamicsymbols
from simupy.systems import (need_state_equation_function_msg,
zero_dim_output_msg)
from simupy.array import Array, r_
x = x1, x2 = Array(dynamicsymbols('x1:3'))
mu = sp.symbols('mu')
state_equation = r_[x2, -x1+mu*(1-x1**2)*x2]
output_equation = r_[x1**2 + x2**2, sp.atan2(x2, x1)]
constants = {mu: 5}
def test_dim_output_0():
with pytest.raises(ValueError, match=zero_dim_output_msg):
DynamicalSystem(input_=x, constants_values=constants)
def test_state_equation_kwarg():
with pytest.raises(ValueError, match=need_state_equation_function_msg):
DynamicalSystem(state=x, constants_values=constants)
sys = DynamicalSystem(state=x,
state_equation=state_equation,
constants_values=constants)
args = np.random.rand(len(x)+1)
npt.assert_allclose(
sys.state_equation_function(args[0], args[1:]).squeeze(),
np.r_[args[2], -args[1]+constants[mu]*(1-args[1]**2)*args[2]]
)
def test_output_equation_function_kwarg():
with pytest.raises(ValueError, match=zero_dim_output_msg):
DynamicalSystem(input_=x)
args = np.random.rand(len(x)+1)
sys = DynamicalSystem(state=x,
state_equation=state_equation,
constants_values=constants)
npt.assert_allclose(
sys.output_equation_function(args[0], args[1:]).squeeze(),
args[1:]
)
sys = DynamicalSystem(state=x,
state_equation=state_equation,
output_equation=output_equation,
constants_values=constants)
npt.assert_allclose(
sys.output_equation_function(args[0], args[1:]).squeeze(),
np.r_[args[1]**2 + args[2]**2, np.arctan2(args[2], args[1])]
)
| {
"content_hash": "267929a9cdafdd78f465f173b90cc8f9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 37.0377358490566,
"alnum_prop": 0.6174223127865512,
"repo_name": "simupy/simupy",
"id": "190b4e2beb8995c8954c8057b04bd167a5c9bfd7",
"size": "1963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_symbolic_system.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "140544"
},
{
"name": "TeX",
"bytes": "1003"
}
],
"symlink_target": ""
} |
"""This file contains the descriptions and settings for all modules. Also it contains functions to add modules and so on"""
try:
import json
except ImportError:
import simplejson as json
from flask import jsonify, render_template, request
from maraschino.database import db_session
import maraschino
import copy
from Maraschino import app
from maraschino.tools import *
from maraschino.database import *
from maraschino.models import Module, NewznabSite
# name, label, description, and static are not user-editable and are taken from here
# poll and delay are user-editable and saved in the database - the values here are the defaults
# settings are also taken from the database - the values here are defaults
# if static = True then poll and delay are ignored
AVAILABLE_MODULES = [
{
'name': 'couchpotato',
'label': 'Manager - CouchPotato',
'description': 'Manage CouchPotato from within Maraschino',
'static': True,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'couchpotato_api',
'value': '',
'description': 'CouchPotato API Key',
},
{
'key': 'couchpotato_user',
'value': '',
'description': 'CouchPotato Username',
},
{
'key': 'couchpotato_password',
'value': '',
'description': 'CouchPotato Password',
},
{
'key': 'couchpotato_ip',
'value': '',
'description': 'CouchPotato Hostname',
},
{
'key': 'couchpotato_port',
'value': '',
'description': 'CouchPotato Port',
},
{
'key': 'couchpotato_webroot',
'value': '',
'description': 'CouchPotato Webroot',
},
{
'key': 'couchpotato_https',
'value': '0',
'description': 'Use HTTPS',
'type': 'bool',
},
{
'key': 'couchpotato_compact',
'value': '0',
'description': 'Compact view',
'type': 'bool',
},
]
},
{
'name': 'headphones',
'label': 'Manager - Headphones',
'description': 'Manage Headphones from within Maraschino',
'static': True,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'headphones_host',
'value': '',
'description': 'Headphones Hostname',
},
{
'key': 'headphones_port',
'value': '',
'description': 'Headphones Port',
},
{
'key': 'headphones_webroot',
'value': '',
'description': 'Headphones Webroot',
},
{
'key': 'headphones_user',
'value': '',
'description': 'Headphones Username',
},
{
'key': 'headphones_password',
'value': '',
'description': 'Headphones Password',
},
{
'key': 'headphones_api',
'value': '',
'description': 'Headphones API Key',
},
{
'key': 'headphones_https',
'value': '0',
'description': 'Use HTTPS',
'type': 'bool',
},
{
'key': 'headphones_compact',
'value': '0',
'description': 'Compact view',
'type': 'bool',
},
]
},
{
'name': 'sickbeard',
'label': 'Manager - Sickbeard',
'description': 'Manage Sickbeard from within Maraschino',
'static': True,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'sickbeard_api',
'value': '',
'description': 'Sickbeard API Key',
},
{
'key': 'sickbeard_user',
'value': '',
'description': 'Sickbeard Username',
},
{
'key': 'sickbeard_password',
'value': '',
'description': 'Sickbeard Password',
},
{
'key': 'sickbeard_ip',
'value': '',
'description': 'Sickbeard Hostname',
},
{
'key': 'sickbeard_port',
'value': '',
'description': 'Sickbeard Port',
},
{
'key': 'sickbeard_webroot',
'value': '',
'description': 'Sickbeard Webroot',
},
{
'key': 'sickbeard_https',
'value': '0',
'description': 'Use HTTPS',
'type': 'bool',
},
{
'key': 'sickbeard_compact',
'value': '0',
'description': 'Compact view',
'type': 'bool',
},
{
'key': 'sickbeard_airdate',
'value': '0',
'description': 'Show air date',
'type': 'bool',
},
]
},
{
'name': 'sickrage',
'label': 'Manager - Sickrage',
'description': 'Manage Sickrage from within Maraschino',
'static': True,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'sickrage_api',
'value': '',
'description': 'Sickrage API Key',
},
{
'key': 'sickrage_user',
'value': '',
'description': 'Sickrage Username',
},
{
'key': 'sickrage_password',
'value': '',
'description': 'Sickrage Password',
},
{
'key': 'sickrage_ip',
'value': '',
'description': 'Sickrage Hostname',
},
{
'key': 'sickrage_port',
'value': '',
'description': 'Sickrage Port',
},
{
'key': 'sickrage_webroot',
'value': '',
'description': 'Sickrage Webroot',
},
{
'key': 'sickrage_https',
'value': '0',
'description': 'Use HTTPS',
'type': 'bool',
},
{
'key': 'sickrage_compact',
'value': '0',
'description': 'Compact view',
'type': 'bool',
},
{
'key': 'sickrage_airdate',
'value': '0',
'description': 'Show air date',
'type': 'bool',
},
]
},
{
'name': 'plex',
'label': 'Plex Module',
'description': 'Check your Plex Server content - OnDeck and Library',
'static': True,
'poll': 0,
'delay': 0,
},
{
'name': 'plex_recent_movies',
'label': 'Plex - Recently Added Movies',
'description': 'See the latest Movies from your Plex Library',
'static': False,
'poll': 360,
'delay': 0,
},
{
'name': 'plex_recent_episodes',
'label': 'Plex - Recently Added Episodes',
'description': 'See the latest Episodes from your Plex Library',
'static': False,
'poll': 360,
'delay': 0,
},
{
'name': 'plex_recent_albums',
'label': 'Plex - Recently Added Albums',
'description': 'See the latest Albums from your Plex Library',
'static': False,
'poll': 360,
'delay': 0,
},
{
'name': 'plex_recent_photos',
'label': 'Plex - Recently Added Photos',
'description': 'See the latest Photos from your Plex Library',
'static': False,
'poll': 360,
'delay': 0,
},
{
'name': 'nzbget',
'label': 'Usenet - NZBGet',
'description': 'Shows you information about your NZBGet downloads.',
'static': False,
'poll': 10,
'delay': 0,
'settings': [
{
'key': 'nzbget_host',
'value': '',
'description': 'Hostname',
},
{
'key': 'nzbget_port',
'value': '',
'description': 'Port',
},
{
'key': 'nzbget_password',
'value': '',
'description': 'Password',
},
{
'key': 'nzbget_https',
'value': '0',
'description': 'Use HTTPS',
'type': 'bool',
},
]
},
{
'name': 'sabnzbd',
'label': 'Usenet - SABnzbd+',
'description': 'Shows you information about your SABnzbd+ downloads.',
'static': False,
'poll': 10,
'delay': 0,
'settings': [
{
'key': 'sabnzbd_host',
'value': '',
'description': 'Hostname',
},
{
'key': 'sabnzbd_port',
'value': '',
'description': 'Port',
},
{
'key': 'sabnzbd_webroot',
'value': '',
'description': 'Webroot',
},
{
'key': 'sabnzbd_api',
'value': '',
'description': 'API Key',
},
{
'key': 'sabnzbd_https',
'value': '0',
'description': 'Use HTTPS',
'type': 'bool',
},
{
'key': 'sabnzbd_show_empty',
'value': '1',
'description': 'Show module when queue is empty',
'type': 'bool',
},
]
},
{
'name': 'transmission',
'label': 'Torrent - Transmission',
'description': 'Shows you information about your Transmission downloads.',
'static': False,
'poll': 10,
'delay': 0,
'settings': [
{
'key': 'transmission_ip',
'value': '',
'description': 'Transmission Hostname',
},
{
'key': 'transmission_port',
'value': '9091',
'description': 'Transmission Port',
},
{
'key': 'transmission_user',
'value': '',
'description': 'Transmission Username',
},
{
'key': 'transmission_password',
'value': '',
'description': 'Transmission Password',
},
{
'key': 'transmission_webroot',
'value': '',
'description': 'Transmission Webroot (Optional)',
},
{
'key': 'transmission_show_empty',
'value': '1',
'description': 'Show module with no active torrents',
'type': 'bool',
},
]
},
{
'name': 'utorrent',
'label': 'Torrent - uTorrent',
'description': 'Shows information about uTorrent downloads',
'static': False,
'poll': 10,
'delay': 0,
'settings': [
{
'key': 'utorrent_ip',
'value': '',
'description': 'uTorrent Hostname',
},
{
'key': 'utorrent_port',
'value': '8080',
'description': 'uTorrent Port',
},
{
'key': 'utorrent_user',
'value': '',
'description': 'uTorrent Username',
},
{
'key': 'utorrent_password',
'value': '',
'description': 'uTorrent Password',
},
]
},
{
'name': 'rtorrentdl',
'label': 'Torrent - rTorrent',
'description': 'Shows information about rTorrent downloads',
'static': False,
'poll': 30,
'delay': 0,
'settings': [
{
'key': 'rtorrent_proto',
'value': 'http',
'description': 'rTorrent protocol',
'type': 'select',
'options': [
{'value': 'http', 'label': 'http'},
{'value': 'https', 'label': 'https'},
{'value': 'scgi', 'label': 'scgi'}
]
},
{
'key': 'rtorrent_host',
'value': 'example.com/RPC2',
'description': 'rTorrent hostname/location',
},
{
'key': 'rtorrent_port',
'value': '',
'description': 'rTorrent port (optional for http/https)',
},
{
'key': 'rtorrent_user',
'value': '',
'description': 'rTorrent username (http/https only)',
},
{
'key': 'rtorrent_password',
'value': '',
'description': 'rTorrent password (http/https only)',
},
{
'key': 'rtorrent_list_scroll',
'value': '1',
'description': 'Use fixed torrent list size',
'type': 'bool'
}
]
},
{
'name': 'traktplus',
'label': 'Trakt.TV',
'description': 'trakt.tv module',
'static': False,
'poll': 0,
'delay': 10,
'settings': [
{
'key': 'trakt_api_key',
'value': '',
'description': 'Trakt API Key',
'link': 'http://trakt.tv/settings/api',
},
{
'key': 'trakt_username',
'value': '',
'description': 'Trakt Username',
},
{
'key': 'trakt_password',
'value': '',
'description': 'Trakt Password',
},
{
'key': 'trakt_default_view',
'value': 'trending',
'description': 'Default view',
'type': 'select',
'options': [
{'value': 'trending_shows', 'label': 'Trending (TV Shows)'},
{'value': 'trending_movies', 'label': 'Trending (Movies)'},
{'value': 'activity_friends', 'label': 'Activity (Friends)'},
{'value': 'activity_community', 'label': 'Activity (Community)'},
{'value': 'friends', 'label': 'Friends'},
{'value': 'calendar' , 'label': 'Calendar'},
{'value': 'recommendations_shows' , 'label': 'Recommendations (TV Shows)'},
{'value': 'recommendations_movies' , 'label': 'Recommendations (Movies)'},
{'value': 'profile' , 'label': 'My profile'},
]
},
{
'key': 'trakt_default_media',
'value': 'shows',
'description': 'Default media type',
'type': 'select',
'options': [
{'value': 'shows', 'label': 'Shows'},
{'value': 'movies', 'label': 'Movies'},
]
},
{
'key': 'trakt_trending_limit',
'value': '20',
'description': 'How many trending items to display',
'type': 'select',
'options': [
{'value': '20', 'label': '20'},
{'value': '40', 'label': '40'},
{'value': '60', 'label': '60'},
]
},
]
},
{
'name': 'trakt',
'label': 'Trakt.TV - Shouts',
'description': 'Shows you what people are saying about what you are watching and allows you to add your own comments.',
'static': True,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'trakt_api_key',
'value': '',
'description': 'Trakt API Key',
'link': 'http://trakt.tv/settings/api',
},
{
'key': 'trakt_username',
'value': '',
'description': 'Trakt Username',
},
{
'key': 'trakt_password',
'value': '',
'description': 'Trakt Password',
},
]
},
{
'name': 'applications',
'label': 'Utility - Applications',
'description': 'Allows you to link to whatever applications you want (SabNZBd, SickBeard, etc.)',
'static': True,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'app_new_tab',
'value': '0',
'description': 'Open application in new tab.',
'type': 'bool',
},
]
},
{
'name': 'diskspace',
'label': 'Utility - Disk space',
'description': 'Shows you available disk space on your various drives.',
'static': False,
'poll': 350,
'delay': 0,
'settings': [
{
'key': 'show_grouped_disks',
'value': '0',
'description': 'Show grouped disks outside of group.',
'type': 'bool',
},
{
'key': 'use_binary_units',
'value': '1',
'description': 'Use binary storage units (eg. GiB rather than GB)',
'type': 'bool',
},
]
},
{
'name':'ipcamera',
'label':'Utility - IP Camera',
'description':'Show and control your ip camera',
'static': False,
'poll': 0,
'delay': 0,
'settings': [
{
'key': 'ipcamera_ip',
'value': '',
'description': 'Ip',
},
{
'key': 'ipcamera_port',
'value': '',
'description': 'Port',
},
{
'key': 'ipcamera_username',
'value': '',
'description': 'Username',
},
{
'key': 'ipcamera_password',
'value': '',
'description': 'Password',
},
{
'key': 'ipcamera_type',
'value': 'fosscammjeg',
'description': 'Pick your camera',
'type': 'select',
'options': [
{'value':'foscammjeg', 'label':'Foscam MJEG'},
{'value':'foscammp4', 'label':'Foscam MP4'},
]
},
]
},
{
'name': 'script_launcher',
'label': 'Utility - Script Launcher',
'description': 'Runs scripts on same system Maraschino is located.',
'static': False,
'poll': 350,
'delay': 0,
},
{
'name': 'weather',
'label': 'Utility - Weather',
'description': 'Weather details.',
'static': False,
'poll': 350,
'delay': 0,
'settings': [
{
'key': 'weather_location',
'value': '',
'description': 'weather.com area ID',
'link': 'http://edg3.co.uk/snippets/weather-location-codes/',
},
{
'key': 'weather_use_celcius',
'value': '0',
'description': 'Temperature in C',
'type': 'bool',
},
{
'key': 'weather_use_kilometers',
'value': '0',
'description': 'Wind Speed in Km',
'type': 'bool',
},
{
'key': 'weather_time',
'value': '0',
'description': '24 hour time',
'type': 'bool',
},
{
'key': 'weather_compact',
'value': '0',
'description': 'Compact view',
'type': 'bool',
},
]
},
]
MISC_SETTINGS = [
{
'key': 'random_backgrounds',
'value': '0',
'description': 'Use a random background when not watching media',
'type': 'bool',
},
{
'key': 'num_columns',
'value': '3',
'description': 'Number of columns',
'type': 'select',
'options': [
{'value': '3', 'label': '3'},
{'value': '4', 'label': '4'},
{'value': '5', 'label': '5'},
]
},
{
'key': 'title_color',
'value': 'EEE',
'description': 'Module title color (hexadecimal)',
},
]
SERVER_SETTINGS = [
{
'key': 'maraschino_username',
'value': '',
'description': 'Maraschino username',
},
{
'key': 'maraschino_password',
'value': '',
'description': 'Maraschino password',
},
{
'key': 'maraschino_port',
'value': '7000',
'description': 'Maraschino port',
},
{
'key': 'maraschino_webroot',
'value': '',
'description': 'Maraschino webroot',
},
]
SEARCH_SETTINGS = [
{
'key': 'search',
'value': '0',
'description': 'Enable search feature',
'type': 'bool',
},
{
'key': 'search_retention',
'value': '',
'description': 'Usenet retention',
},
{
'key': 'search_ssl',
'value': '0',
'description': 'Prefer SSL',
'type': 'bool',
},
{
'key': 'search_english',
'value': '0',
'description': 'Prefer English only',
'type': 'bool',
},
]
PLEX = [
{
'key': 'myPlex_username',
'value': '',
'description': 'myPlex username',
},
{
'key': 'myPlex_password',
'value': '',
'description': 'myPlex password',
},
]
@app.route('/xhr/add_module_dialog')
@requires_auth
def add_module_dialog():
"""Dialog to add a new module to Maraschino"""
modules_on_page = Module.query.all()
available_modules = copy.copy(AVAILABLE_MODULES)
# filter all available modules that are not currently on the page
for module_on_page in modules_on_page:
for available_module in available_modules:
if module_on_page.name == available_module['name']:
available_modules.remove(available_module)
break
return render_template('dialogs/add_module_dialog.html',
available_modules = available_modules,
)
@app.route('/xhr/add_module', methods=['POST'])
@requires_auth
def add_module():
"""Add a new module to Maraschino"""
try:
module_id = request.form['module_id']
column = request.form['column']
position = request.form['position']
# make sure that it's a valid module
module_info = get_module_info(module_id)
if not module_info:
raise Exception
except:
return jsonify({ 'status': 'error' })
module = Module(
module_info['name'],
column,
position,
module_info['poll'],
module_info['delay'],
)
db_session.add(module)
# if module template has extra settings then create them in the database
# with default values if they don't already exist
if 'settings' in module_info:
for s in module_info['settings']:
setting = get_setting(s['key'])
if not setting:
setting = Setting(s['key'], s['value'])
db_session.add(setting)
db_session.commit()
module_info['template'] = '%s.html' % (module_info['name'])
# if the module is static and doesn't have any extra settings, return
# the rendered module
if module_info['static'] and not 'settings' in module_info:
return render_template('placeholder_template.html',
module = module_info
)
# otherwise return the rendered module settings dialog
else:
return module_settings_dialog(module_info['name'])
@app.route('/xhr/rearrange_modules', methods=['POST'])
@requires_auth
def rearrange_modules():
"""Rearrange a module on the page"""
try:
modules = json.JSONDecoder().decode(request.form['modules'])
except:
return jsonify({ 'status': 'error' })
for module in modules:
try:
m = Module.query.filter(Module.name == module['name']).first()
m.column = module['column']
m.position = module['position']
db_session.add(m)
except:
pass
db_session.commit()
return jsonify({ 'status': 'success' })
@app.route('/xhr/remove_module/<name>', methods=['POST'])
@requires_auth
def remove_module(name):
"""Remove module from the page"""
module = Module.query.filter(Module.name == name).first()
db_session.delete(module)
db_session.commit()
return jsonify({ 'status': 'success' })
@app.route('/xhr/module_settings_dialog/<name>')
@requires_auth
def module_settings_dialog(name):
"""show settings dialog for module"""
module_info = get_module_info(name)
module_db = get_module(name)
if module_info and module_db:
# look at the module template so we know what settings to look up
module = copy.copy(module_info)
# look up poll and delay from the database
module['poll'] = module_db.poll
module['delay'] = module_db.delay
# iterate through possible settings and get values from database
if 'settings' in module:
for s in module['settings']:
setting = get_setting(s['key'])
if setting:
s['value'] = setting.value
if 'plex_servers' in s:
s['options'] = module_get_plex_servers()
return render_template('dialogs/module_settings_dialog.html',
module = module,
)
return jsonify({ 'status': 'error' })
@app.route('/xhr/module_settings_cancel/<name>')
@requires_auth
def module_settings_cancel(name):
"""Cancel the settings dialog"""
module = get_module_info(name)
if module:
module['template'] = '%s.html' % (module['name'])
return render_template('placeholder_template.html',
module = module,
)
return jsonify({ 'status': 'error' })
@app.route('/xhr/module_settings_save/<name>', methods=['POST'])
@requires_auth
def module_settings_save(name):
"""Save options in settings dialog"""
try:
settings = json.JSONDecoder().decode(request.form['settings'])
except:
return jsonify({ 'status': 'error' })
for s in settings:
# poll and delay are stored in the modules tables
if s['name'] == 'poll' or s['name'] == 'delay':
module = get_module(name)
if s['name'] == 'poll':
module.poll = int(s['value'])
if s['name'] == 'delay':
module.delay = int(s['value'])
db_session.add(module)
# other settings are stored in the settings table
else:
setting = get_setting(s['name'])
if not setting:
setting = Setting(s['name'])
setting.value = s['value']
db_session.add(setting)
if s['name'] == 'maraschino_username':
maraschino.AUTH['username'] = s['value'] if s['value'] != '' else None
if s['name'] == 'maraschino_password':
maraschino.AUTH['password'] = s['value'] if s['value'] != '' else None
db_session.commit()
# you can't cancel server settings - instead, return an updated template
# with 'Settings saved' text on the button
if name == 'server_settings':
return extra_settings_dialog(dialog_type='server_settings', updated=True)
elif name == 'plex_login':
from maraschino.noneditable import json_login
try:
return json_login()
except:
logger.log('Plex :: Failed to populate servers with new credentials', 'ERROR')
# for everything else, return the rendered module
return module_settings_cancel(name)
@app.route('/xhr/extra_settings_dialog/<dialog_type>')
@requires_auth
def extra_settings_dialog(dialog_type, updated=False):
"""
Extra settings dialog (search settings, misc settings, etc).
"""
dialog_text = None
dialog_extra = None
if dialog_type == 'search_settings':
settings = copy.copy(SEARCH_SETTINGS)
dialog_title = 'Search settings'
dialog_text = 'N.B. With search enabled, you can press \'ALT-s\' to display the search module.'
dialog_extra = NewznabSite.query.order_by(NewznabSite.id)
elif dialog_type == 'misc_settings':
settings = copy.copy(MISC_SETTINGS)
dialog_title = 'Misc. settings'
elif dialog_type == 'server_settings':
settings = copy.copy(SERVER_SETTINGS)
dialog_title = 'Server settings'
elif dialog_type == 'plex_login':
settings = copy.copy(PLEX)
dialog_text = 'Credentials to log into http://plex.tv.'
dialog_title = 'myPlex Credentials'
else:
return jsonify({ 'status': 'error' })
for s in settings:
setting = get_setting(s['key'])
if setting:
s['value'] = setting.value
return render_template('dialogs/extra_settings_dialog.html',
dialog_title=dialog_title,
dialog_text=dialog_text,
dialog_type=dialog_type,
dialog_extra=dialog_extra,
settings=settings,
updated=updated,
)
def get_module(name):
"""helper method which returns a module record from the database"""
try:
return Module.query.filter(Module.name == name).first()
except:
return None
def get_module_info(name):
"""helper method which returns a module template"""
for available_module in AVAILABLE_MODULES:
if name == available_module['name']:
return available_module
return None
| {
"content_hash": "8be446e44044ffa9fd1c703b6043e492",
"timestamp": "",
"source": "github",
"line_count": 1063,
"max_line_length": 127,
"avg_line_length": 29.454374412041393,
"alnum_prop": 0.43688917278824657,
"repo_name": "runjmc/maraschino",
"id": "c78cd89be7df0e96af5bad53a2cffafb87366669",
"size": "31334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maraschino/modules.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "119821"
},
{
"name": "HTML",
"bytes": "220937"
},
{
"name": "JavaScript",
"bytes": "129299"
},
{
"name": "Python",
"bytes": "5482676"
},
{
"name": "Shell",
"bytes": "3965"
}
],
"symlink_target": ""
} |
import sys
from fcgi import WSGIServer
import os
import atexit
import cherrypy
import cherrypy._cpwsgi
import turbogears
turbogears.update_config(configfile="C:\\workspace\\eCRM\\prod.cfg", modulename="ecrm.config")
from ecrm.controllers import Root
cherrypy.root = Root()
if cherrypy.server.state == 0:
atexit.register(cherrypy.server.stop)
cherrypy.server.start(init_only=True, server_class=None)
def application(environ, start_response):
if environ['SCRIPT_NAME']:
environ['PATH_INFO'] = environ['SCRIPT_NAME'][1:]
environ['SCRIPT_NAME'] = '/'
return cherrypy._cpwsgi.wsgiApp(environ, start_response)
WSGIServer(application, bindAddress = ("192.168.1.183",888)).run() | {
"content_hash": "39ce27fb7041ee4268620dc3921a9182",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 94,
"avg_line_length": 26.444444444444443,
"alnum_prop": 0.726890756302521,
"repo_name": "LamCiuLoeng/bossini",
"id": "6b9d185c8b53b40d1e75f5884d3f6fa54804fede",
"size": "714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114853"
},
{
"name": "JavaScript",
"bytes": "487563"
},
{
"name": "Python",
"bytes": "444365"
},
{
"name": "SQL",
"bytes": "334001"
}
],
"symlink_target": ""
} |
import subprocess, os, base64
from cantools import config
from cantools.util import log, error, read, write, mkdir
# jsmin was removed from the cantools requirements to reduce
# the maintenance burden for deployments on older systems
# (often with some degree of broken package management) that
# don't need to compile anything. so we check here instead.
try:
from jsmin import jsmin
BUILDER_READY = True
except:
BUILDER_READY = False
pcfg = config.build.prod
pwrap = {
"standard": "<script>%s</script>",
"closure": "<script>(function(){%s})();</script>"
}[pcfg.closure and "closure" or "standard"]
iwraps = {
"standard": "%s = %s || {}",
"closure": 'if (typeof %s == "undefined") %s = {}',
"closvar": 'if (typeof %s == "undefined") var %s = {}'
}
iwrap = iwraps[pcfg.closure and "closure" or "standard"]
bwrap = "Function(atob(\"%s\"))();"
def pwrapper(stxt):
if pcfg.b64:
stxt = bwrap%(base64.b64encode(stxt.encode()).decode(),)
return pwrap%(stxt,)
def iwrapper(mod):
if config.build.prod.closure:
parts = mod.split(".")
if len(parts) == 2: # good enough? ....
return iwraps["closvar"]%(parts[1], parts[1])
return iwrap%(mod, mod)
def nextQuote(text, lastIndex=0):
z = i = text.find('"', lastIndex)
while z > 0 and text[z-1] == "\\":
z -= 1
return ((i-z)%2 == 0) and i or nextQuote(text, i+1)
def encodestrings(text):
start = nextQuote(text) + 1
while start != 0:
end = nextQuote(text, start)
if end == -1:
error("parse",
"unclosed quote: character %s"%(start-1,),
"this quote: %s"%(text[start:start+config.parse_error_segment_length],),
text)
word = ''.join(["\\%s"%(oct(ord(ch))[1:],) for ch in list(text[start:end])])
if "\\134" not in word: # don't re-escape!
text = text[:start] + word + text[end:]
start = nextQuote(text, start + len(word) + 1) + 1
return text
def processhtml(html):
html = html.replace("{", "{").replace("}", "}").replace("</body>", "%s</body>"%(config.noscript,))
start = 0
while not start or html[start + config.js.offset + 1] == "/":
firststart = start = end = html.find(config.js.flag, start + 1)
js = []
while start != -1:
start += config.js.offset
end = html.find('"', start)
if end == -1:
error("no closing quote in this file: %s"%(html,))
js.append(html[start:end].strip("/"))
start = html.find(config.js.flag, end)
log("js: %s"%(js,), 1)
if start == end:
return html, ""
return html[:firststart] + "{jsspot}" + html[end+config.js.endoffset:], js
def compress(html):
log("compressing html", 1)
newhtml = html.replace("\n", " ").replace("\t", " ")
while " " in newhtml:
newhtml = newhtml.replace(" ", " ")
newhtml = newhtml.replace("> <", "><")
log("orig: %s. new: %s"%(len(html), len(newhtml)), 2)
return newhtml
def bfiles(dirname, fnames):
return [fname for fname in fnames if os.path.isfile(os.path.join(dirname, fname)) and fname != ".svn" and not fname.endswith("~") and not "_old." in fname]
def tryinit(iline, inits, prefixes):
if iline not in inits:
inits.add(iline)
prefixes.append(iline)
fragz = set()
def frag(path):
if '"' in path:
path = path.split('"')[0]
for root, dirs, files in os.walk(path, followlinks=True):
for sub in files:
if sub.endswith(".js"):
fp = os.path.join(root, sub)
log("fragment identified: %s"%(fp,), 1)
fragz.add(fp)
else:
fragz.add(path)
def require(line, jspaths, block, inits, admin_ct_path=None):
dynamic = False
if line.startswith("CT.scriptImport("):
rline = line[17:].split(line[16])[0]
if line[16] not in "'\"" or rline.startswith("http"):
log("skipping scriptImport: %s"%(line,), important=True)
return block
dynamic = True
else:
rline = line.split('require(')[1][1:].split(");")[0].strip(")")
if rline.endswith('"skip"'):
log("skipping require: %s"%(line,), important=True)
return block
elif rline.endswith(", true"):
dynamic = True
rline = rline.split(", ")[0]
rline = rline[:-1]
rsplit = rline.split(".")
log("module %s"%(rline,), important=True)
jspath = os.path.join(config.js.path, *rsplit) + ".js"
log("path %s"%(jspath,))
log("dynamic %s"%(dynamic,))
if jspath not in jspaths:
if dynamic:
frag(jspath)
else:
prefixes = []
fullp = "window"
for rword in rsplit:
if rword[0].isalpha():
fullp = ".".join([fullp, rword])
else:
fullp = "%s[%s]"%(fullp, rword)
tryinit(iwrapper(fullp), inits, prefixes)
pblock = ";".join(prefixes)
if pblock:
if config.build.prod.closure:
pblock = pblock.replace("window.", "")
jspaths.append(pblock)
block = block.replace(line, "%s;%s"%(pblock,
processjs(jspath, jspaths, inits, admin_ct_path)), 1)
return block
def processjs(path, jspaths=[], inits=set(), admin_ct_path=None):
log("processing js: %s"%(path,), 1)
p = path.split("#")[0] # potentially modded to locate file for prod (path remains the same for static)
if admin_ct_path: # admin pages
if path.startswith(config.js.path): # such as /js/CT/ct.js
p = admin_ct_path + path[len(config.js.path):]
else: # regular admin pages (/memcache/mc.js)
p = os.path.join(os.path.abspath(os.curdir), "dynamic", path)
block = read(p)
for line in [bit for bit in block.split("\n") if not bit.startswith("//")]:
for flag in ["CT.require(", "CT.scriptImport("]:
if flag in line and line[line.index(flag) - 1] != "'":
rline = line.strip().split(flag)[1]
if rline[0] != "\\" and rline[1] not in ".,": # require() is embedded in text. dot is weird.
block = require("%s%s"%(flag, rline),
jspaths, block, inits, admin_ct_path)
jspaths.append(path)
return "%s;\n"%(block,)
def compilejs(js, admin_ct_path=None):
jsblock = ""
jspaths = []
inits = set([iwrapper("window.CT")]) # already initialized
for p in js:
jsblock += processjs(p, jspaths, inits, admin_ct_path)
return jspaths, jsblock
def checkdir(p, recursive=False):
if not os.path.isdir(p):
mkdir(p, recursive)
def remerge(txt, js):
return txt.format(jsspot=js).replace("{", "{").replace("}", "}")
def build_frags(mode="web", admin_ct_path=None):
log("Compiling Dynamically-Referenced Fragments", important=True)
base = config.build[mode].compiled.production
if config.build.include:
log("Including Config-Specified Modules")
for p in config.build.include:
log("include: %s"%(p,), 1)
fragz.add(p)
fragged = set()
def build_frag(frag):
block = processjs(frag, admin_ct_path=admin_ct_path)
path = os.path.join(base, frag[len(config.js.path)+1:])
checkdir(path.rsplit("/", 1)[0], True)
if frag in config.build.exclude:
log("path excluded -- skipping compression/obfuscation", 2)
else:
log("mangling", 2)
block = jsmin(block)
write(block, path)
while len(fragged) is not len(fragz):
fcopy = list(filter(lambda f : f not in fragged, fragz))
fragged.update(fcopy)
list(map(build_frag, fcopy))
def build(admin_ct_path, dirname, fnames):
"""
This parses an html file, squishes together the javascript, scans
through for dynamic imports (CT.require statements), injects modules
wherever necessary, and sticks the result in a big <script> tag.
"""
conf = admin_ct_path and config.build.admin or config.build.web
todir_stat = dirname.replace(conf.dynamic, conf.compiled.static)
todir_prod = dirname.replace(conf.dynamic, conf.compiled.production)
log("Target Static Directory: %s"%(todir_stat,), important=True)
log("Target Production Directory: %s"%(todir_prod,))
checkdir(todir_stat)
checkdir(todir_prod)
for fname in bfiles(dirname, fnames):
frompath = os.path.join(dirname, fname)
topath_stat = os.path.join(todir_stat, fname)
topath_prod = os.path.join(todir_prod, fname)
data = read(frompath)
if "fonts" in dirname or not fname.endswith(".html"):
log('copying non-html file (%s)'%(fname,), 1)
else:
log("processing html: %s"%(frompath,))
txt, js = processhtml(data)
if js:
jspaths, jsblock = compilejs(js, admin_ct_path)
log('writing static: %s -> %s'%(frompath, topath_stat), important=True)
write(remerge(txt, '\n'.join([p.split("#")[0].endswith("js") and '<script src="/%s"></script>'%(p,) or '<script>%s</script>'%(p,) for p in jspaths])), topath_stat)
log('writing production: %s -> %s'%(frompath, topath_prod))
jsb = jsblock.replace('"_encode": false,', '"_encode": true,').replace("CT.log._silent = false;", "CT.log._silent = true;")
if config.customscrambler:
jsb += '; CT.net.setScrambler("%s");'%(config.scrambler,)
write(remerge(compress(txt), pwrapper(jsmin(jsb))), topath_prod)
continue
else:
log('copying to prod/stat unmodified (%s)'%(fname,), important=True)
write(data, topath_stat)
write(data, topath_prod)
for fname in [f for f in fnames if os.path.isdir(os.path.join(dirname, f))]:
for dname, dirnames, filenames in os.walk(os.path.join(dirname, fname)):
build(admin_ct_path, dname, filenames + dirnames)
def silence_warnings():
from ply import yacc, lex
def quiet(*args, **kwargs):
pass
yacc.PlyLogger.warning = quiet
lex.PlyLogger.warning = quiet
def build_all(mode="web", admin_ct_path=None):
if not BUILDER_READY:
error("can't build - please install jsmin >= 2.2.2")
#silence_warnings() # is this unnecessary now.... maybe...
for dirname, dirnames, filenames in os.walk(config.build[mode].dynamic):
build(admin_ct_path, dirname, filenames + dirnames)
build_frags(mode, admin_ct_path)
if __name__ == "__main__":
build_all() | {
"content_hash": "ef1cad1d58b5fe0110fc56a32468322c",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 179,
"avg_line_length": 40.149812734082396,
"alnum_prop": 0.5728544776119403,
"repo_name": "bubbleboy14/cantools",
"id": "a157e6aa06941e23ee2d0dd64dfab2950946c72d",
"size": "10720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cantools/scripts/builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32807"
},
{
"name": "HTML",
"bytes": "496346"
},
{
"name": "JavaScript",
"bytes": "377843"
},
{
"name": "Python",
"bytes": "202009"
},
{
"name": "Shell",
"bytes": "2292"
}
],
"symlink_target": ""
} |
import mock
import webob
from jacket.api.compute.openstack.compute.legacy_v2.contrib import server_external_events \
as server_external_events_v2
from jacket.api.compute.openstack.compute import server_external_events \
as server_external_events_v21
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
fake_instances = {
'00000000-0000-0000-0000-000000000001': compute.Instance(
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': compute.Instance(
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': compute.Instance(
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': compute.Instance(
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
@classmethod
def fake_get_by_uuid(cls, context, uuid):
try:
return fake_instances[uuid]
except KeyError:
raise exception.InstanceNotFound(instance_id=uuid)
@mock.patch('compute.compute.instance.Instance.get_by_uuid', fake_get_by_uuid)
class ServerExternalEventsTestV21(test.NoDBTestCase):
server_external_events = server_external_events_v21
invalid_error = exception.ValidationError
def setUp(self):
super(ServerExternalEventsTestV21, self).setUp()
self.api = \
self.server_external_events.ServerExternalEventsController()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0],
'status': 'completed'}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
def _assert_call(self, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(self.req, body=body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
for inst in api_method.call_args_list[0][0][1]:
expected_uuids.remove(inst.uuid)
self.assertEqual([], expected_uuids)
for event in api_method.call_args_list[0][0][2]:
expected_events.remove(event.name)
self.assertEqual([], expected_events)
return result, code
def test_create(self):
result, code = self._assert_call(self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
result, code = self._assert_call(body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, self.req, body=body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_events(self):
body = {'events': 'foo'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_unkown_events(self):
self.event_1['name'] = 'unkown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
@mock.patch('compute.compute.instance.Instance.get_by_uuid', fake_get_by_uuid)
class ServerExternalEventsTestV2(ServerExternalEventsTestV21):
server_external_events = server_external_events_v2
invalid_error = webob.exc.HTTPBadRequest
| {
"content_hash": "53570ae5d1ab80416876840040fb5ab8",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 91,
"avg_line_length": 43.486301369863014,
"alnum_prop": 0.5986769570011026,
"repo_name": "HybridF5/jacket",
"id": "76e535182f42462a5ea2f38acddec01f3054f60d",
"size": "6958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/unit/api/openstack/compute/test_server_external_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.test import Client
from django.core.urlresolvers import reverse
from bot.testing.utils import LogosTestCase
from .bot_plugin import MemosPlugin
# Create your tests here.
from .models import Memo, Folder
class MemoTestCase(TestCase):
def setUp(self):
# create test users
# https://docs.djangoproject.com/en/1.8/topics/auth/default/#creating-users
self.u1 = fred = User.objects.create_user("fred", "fred@nowhere.com", "pass123")
self.u2 = john = User.objects.create_user("john", "john@nowhere.com", "pass456")
def test_memos(self):
fred = authenticate(username='fred', password='pass123')
self.assertIsNotNone(fred)
john = authenticate(username='john', password='pass456')
self.assertIsNotNone(john)
Memo.send_memo(john, fred, "This is a test", "Some body text")
memo_in_inbox = Memo.objects.filter(folder__name='inbox', to_user = fred)
self.assertIsNotNone(memo_in_inbox)
memo_in_outbox = Memo.objects.filter(folder__name='outbox', from_user = john)
self.assertIsNotNone(memo_in_outbox)
# Memos should be cloned, not just referenced!
self.assertNotEqual(memo_in_inbox, memo_in_outbox)
# These folder tests should fail
with self.assertRaises(Memo.DoesNotExist):
Memo.objects.get(folder__name='inbox',to_user__username="john")
with self.assertRaises(Memo.DoesNotExist):
Memo.objects.get(folder__name='outbox',from_user__username="fred")
def test_views(self):
c = Client()
response = c.post('/accounts/login/', {'username': 'john', 'password':'pass456'},
follow=True )
self.assertEqual(response.status_code, 200)
response = c.post('/memos/new/', {'recipient': 'fred',
'subject':'Hi there!',
'message':'How art thou?'},
follow=True )
self.assertEqual(response.status_code, 200)
response = c.get('/memos/outbox/')
# Test memo reachability
memo = response.context['memos'][0]
response = c.get('/memos/preview/'+str(memo.id))
self.assertEqual(response.status_code, 200)
def test_memo_to_self_deleted(self):
"""Test what happens if a memo is sent to self and subsequently deleted
from outbox. Does it also incorrectly delete from inbox?"""
c = Client()
response = c.post('/accounts/login/', {'username': 'john', 'password':'pass456'},
follow=True )
self.assertEqual(response.status_code, 200)
response = c.get('/memos/outbox/')
self.assertFalse (response.context['memos'])
response = c.get('/memos/inbox/')
self.assertFalse (response.context['memos'])
response = c.post('/memos/new/', {'recipient': 'john',
'subject':'Hi there!',
'message':'How art thou?'},
follow=True )
self.assertEqual(response.status_code, 200)
response = c.get('/memos/inbox/')
self.assertTrue (response.context['memos'])
response = c.get('/memos/outbox/')
self.assertTrue (response.context['memos'])
memo_id = response.context['memos'][0].id
response = c.get('/memos/preview/'+str(memo_id))
self.assertEqual(response.status_code, 200)
response = c.get('/memos/trash_memo/'+str(memo_id))
self.assertEqual(response.status_code, 200)
response = c.post('/memos/trash_memo/'+str(memo_id), {'yes': 'Yes'},
follow=True )
self.assertEqual(response.status_code, 200)
response = c.get('/memos/outbox/')
self.assertFalse (response.context['memos'])
response = c.get('/memos/inbox/')
self.assertTrue (response.context['memos'])
def tearDown(self):
self.u1.delete()
self.u2.delete()
class TestMemos(LogosTestCase):
# set plugin_class to the actual class
# of the plugin you wish to test
plugin_class = MemosPlugin
def setUp(self):
# create test users
# https://docs.djangoproject.com/en/1.8/topics/auth/default/#creating-users
self.u1 = fred = User.objects.create_user("fred", "fred@nowhere.com", "pass123")
self.u2 = john = User.objects.create_user("john", "john@nowhere.com", "pass456")
self.u3 = mary = User.objects.create_user("mary", "mary@nowhere.com", "Pass789")
def testMemoSend(self):
self.set_nick("fred")
self.login('pass123')
output = self.plugin.send_command("send john Hi, How are you")
self.assertIn('Memo sent', output)
self.set_nick("john")
self.login("pass456")
output = self.plugin.send_command("check")
self.assertIn('1 unread', output)
output = self.plugin.send_command("read 0")
self.assertIn('How are you', output)
output = self.plugin.send_command("check")
self.assertIn('no unread', output)
output = self.plugin.send_command("delete 0")
self.assertIn('deleted', output)
output = self.plugin.send_command("list")
self.assertIn('No memos found', output)
output = self.plugin.send_command("send Mary Hi, How are you")
self.assertIn('Memo sent', output)
self.set_nick("Mary")
self.login("Pass789")
output = self.plugin.send_command("check")
self.assertIn("You have 1 unread memos", output)
def tearDown(self):
print ("deleting test users.")
self.u1.delete()
self.u2.delete()
self.u3.delete()
| {
"content_hash": "c52cdd4a51ec4374bfe18d982473fda2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 90,
"avg_line_length": 36.558282208588956,
"alnum_prop": 0.59456284611512,
"repo_name": "kiwiheretic/logos-v2",
"id": "80c19988db562e4e96491467b70b150a92107ca4",
"size": "5959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloud_memos/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "39336"
},
{
"name": "HTML",
"bytes": "90623"
},
{
"name": "JavaScript",
"bytes": "2169514"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Python",
"bytes": "610582"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "5552"
}
],
"symlink_target": ""
} |
"""
pybufrkit.decoder
~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import functools
import logging
# noinspection PyUnresolvedReferences
from six.moves import range
from pybufrkit.constants import (BITPOS_START,
MESSAGE_START_SIGNATURE,
NBITS_FOR_NBITS_DIFF,
NBITS_PER_BYTE,
NUMERIC_MISSING_VALUES,
PARAMETER_TYPE_TEMPLATE_DATA,
PARAMETER_TYPE_UNEXPANDED_DESCRIPTORS)
from pybufrkit.errors import PyBufrKitError
from pybufrkit.bitops import get_bit_reader
from pybufrkit.bufr import BufrMessage
from pybufrkit.tables import TableGroupCacheManager
from pybufrkit.templatedata import TemplateData
from pybufrkit.coder import Coder, CoderState
from pybufrkit.dataprocessor import BufrTableDefinitionProcessor
from pybufrkit.templatecompiler import CompiledTemplateManager, process_compiled_template
from pybufrkit.script import ScriptRunner
__all__ = ['Decoder', 'generate_bufr_message']
log = logging.getLogger(__file__)
# noinspection PyUnusedLocal,PyAttributeOutsideInit
class Decoder(Coder):
"""
The decoder takes a bytes type string and decode it to a BUFR Message object.
"""
def __init__(self,
definitions_dir=None,
tables_root_dir=None,
compiled_template_cache_max=None):
super(Decoder, self).__init__(definitions_dir, tables_root_dir)
# Only enable template compilation if cache is requested
if compiled_template_cache_max is not None:
self.compiled_template_manager = CompiledTemplateManager(compiled_template_cache_max)
log.debug('Template compilation enabled with cache size of {}'.format(compiled_template_cache_max))
else:
self.compiled_template_manager = None
def process(self, s, file_path='<string>',
start_signature=MESSAGE_START_SIGNATURE,
info_only=False,
ignore_value_expectation=False,
wire_template_data=True):
"""
Decoding the given message string.
:param s: Message string that contains the BUFR Message
:param file_path: The file where this string is read from.
:param start_signature: Locate the starting position of the message
string with the given signature.
:param info_only: Only show information up to template data (exclusive)
:param ignore_value_expectation: Do not validate the expected value
:param wire_template_data: Whether to wire the template data to construct
a fully hierarchical structure from the flat lists. Only takes effect
when it is NOT info_only.
:return: A BufrMessage object that contains the decoded information.
"""
idx = s.find(start_signature) if start_signature is not None else 0
if idx == -1:
raise PyBufrKitError('Cannot find start signature: {}'.format(start_signature))
s = s[idx:]
bit_reader = get_bit_reader(s)
bufr_message = BufrMessage(file_path)
configuration_transformers = (self.section_configurer.info_configuration,) if info_only else ()
if ignore_value_expectation:
configuration_transformers += (self.section_configurer.ignore_value_expectation,)
nbits_decoded = 0
section_index = 0 # Always start decoding from section 0
while True:
section = self.section_configurer.configure_section(bufr_message, section_index,
configuration_transformers)
section_index += 1
if section is None: # when optional section is not present
continue
nbits_decoded += self.process_section(bufr_message, bit_reader, section)
if section.end_of_message:
break
# The exact bytes that have been decoded
bufr_message.serialized_bytes = s[:nbits_decoded // NBITS_PER_BYTE]
if not info_only and wire_template_data:
bufr_message.wire()
return bufr_message
def process_section(self, bufr_message, bit_reader, section):
"""
Decode the given configured Section.
:param bufr_message: The BUFR message object.
:param section: The BUFR section object.
:param bit_reader:
:return: Number of bits decoded for this section.
"""
section.set_metadata(BITPOS_START, bit_reader.get_pos())
for parameter in section:
if parameter.type == PARAMETER_TYPE_UNEXPANDED_DESCRIPTORS:
parameter.value = self.process_unexpanded_descriptors(bit_reader, section)
elif parameter.type == PARAMETER_TYPE_TEMPLATE_DATA:
parameter.value = self.process_template_data(bufr_message, bit_reader)
elif parameter.nbits == 0:
# Zero number of bits means to read all bits till the end of the section
parameter.value = bit_reader.read(
parameter.type,
section.section_length.value * NBITS_PER_BYTE -
(bit_reader.get_pos() - section.get_metadata(BITPOS_START))
)
else:
parameter.value = bit_reader.read(parameter.type, parameter.nbits)
log.debug('{} = {!r}'.format(parameter.name, parameter.value))
# Make available as a property of the overall message object
if parameter.as_property:
setattr(bufr_message, parameter.name, parameter)
if parameter.expected is not None:
assert parameter.value == parameter.expected, 'Value ({!r}) not as expected ({!r})'.format(
parameter.value, parameter.expected
)
# TODO: option to ignore the declared length?
# TODO: this depends on a specific parameter name, need change to parameter type?
if 'section_length' in section:
nbits_read = bit_reader.get_pos() - section.get_metadata(BITPOS_START)
nbits_unread = section.section_length.value * NBITS_PER_BYTE - nbits_read
if nbits_unread > 0:
log.debug('Skipping {} bits to end of the section'.format(nbits_unread))
bit_reader.read_bin(nbits_unread)
elif nbits_unread < 0:
raise PyBufrKitError('Read exceeds declared section {} length: {} by {} bits'.format(
section.get_metadata('index'), section.section_length.value, -nbits_unread))
return bit_reader.get_pos() - section.get_metadata(BITPOS_START)
def process_unexpanded_descriptors(self, bit_reader, section):
"""
Decode for the list of unexpanded descriptors.
:param section: The BUFR section object.
:param bit_reader:
:return: The unexpanded descriptors as a list.
"""
unexpanded_descriptors = []
nbytes_read = (bit_reader.get_pos() - section.get_metadata(BITPOS_START)) // NBITS_PER_BYTE
for _ in range((section.section_length.value - nbytes_read) // 2):
f = bit_reader.read_uint(2)
x = bit_reader.read_uint(6)
y = bit_reader.read_uint(8)
unexpanded_descriptors.append(f * 100000 + x * 1000 + y)
return unexpanded_descriptors
def process_template_data(self, bufr_message, bit_reader):
"""
Decode data described by the template.
:param bufr_message: The BUFR message object.
:param bit_reader:
:return: TemplateData decoded from the bit stream.
"""
# TODO: Parametrise the "normalize" argument
bufr_template, table_group = bufr_message.build_template(self.tables_root_dir, normalize=1)
state = CoderState(bufr_message.is_compressed.value, bufr_message.n_subsets.value)
if self.compiled_template_manager:
template_to_process = self.compiled_template_manager.get_or_compile(bufr_template, table_group)
template_processing_func = functools.partial(process_compiled_template, self)
else:
template_to_process = bufr_template
template_processing_func = self.process_template
# For uncompressed data, the processing has to be repeated for number of times
# equals to number of subsets. For compressed data, only a single processing
# is needed as all subsets are taken care each time a value is processed.
if bufr_message.is_compressed.value:
template_processing_func(state, bit_reader, template_to_process)
else:
for idx_subset in range(bufr_message.n_subsets.value):
state.switch_subset_context(idx_subset)
template_processing_func(state, bit_reader, template_to_process)
return TemplateData(bufr_template,
bufr_message.is_compressed.value,
state.decoded_descriptors_all_subsets,
state.decoded_values_all_subsets,
state.bitmap_links_all_subsets)
def get_value_for_delayed_replication_factor(self, state):
return state.get_value_for_delayed_replication_factor(-1)
def define_bitmap(self, state, reuse):
"""
For compressed data, bitmap and back referenced descriptors must be
identical Otherwise it makes no sense in compressing different bitmapped
descriptors into one slot.
:param state:
:param reuse: Is this bitmap for reuse?
:return: The bitmap as a list of 0 and 1.
"""
# First get all the bit values for the bitmap
if state.is_compressed:
bitmap = state.decoded_values_all_subsets[0][-state.n_031031:]
else:
bitmap = state.decoded_values[-state.n_031031:]
if reuse:
state.bitmap = bitmap
state.build_bitmapped_descriptors(bitmap)
return bitmap
def process_numeric(self, state, bit_reader, descriptor, nbits, scale_powered, refval):
(self.process_numeric_compressed if state.is_compressed else
self.process_numeric_uncompressed)(state, bit_reader, descriptor, nbits, scale_powered, refval)
def process_numeric_uncompressed(self, state, bit_reader, descriptor, nbits, scale_powered, refval):
state.decoded_descriptors.append(descriptor)
value = bit_reader.read_uint_or_none(nbits)
if value is not None:
if refval:
value += refval
if scale_powered != 1:
value /= scale_powered
state.decoded_values.append(value)
def process_numeric_compressed(self, state, bit_reader, descriptor, nbits_min_value, scale_powered, refval):
state.decoded_descriptors.append(descriptor)
min_value = bit_reader.read_uint_or_none(nbits_min_value)
nbits_diff = bit_reader.read_uint(NBITS_FOR_NBITS_DIFF)
# special cases: all missing or all equals
if min_value is None:
assert nbits_diff == 0, ('{}: nbits_diff must be zero for compressed '
'values that are all missing or equal'.format(descriptor))
for decoded_values in state.decoded_values_all_subsets:
decoded_values.append(None)
elif nbits_diff == 0:
value = min_value
if refval:
value += refval
if scale_powered != 1:
value /= scale_powered
for decoded_values in state.decoded_values_all_subsets:
decoded_values.append(value)
else:
for decoded_values in state.decoded_values_all_subsets:
diff = bit_reader.read_uint_or_none(nbits_diff)
if diff is None:
value = None
else:
value = min_value + diff
if refval:
value += refval
if scale_powered != 1:
value /= scale_powered
decoded_values.append(value)
def process_string(self, state, bit_reader, descriptor, nbytes):
(self.process_string_compressed if state.is_compressed else
self.process_string_uncompressed)(state, bit_reader, descriptor, nbytes)
def process_string_uncompressed(self, state, bit_reader, descriptor, nbytes):
state.decoded_descriptors.append(descriptor)
state.decoded_values.append(bit_reader.read_bytes(nbytes))
def process_string_compressed(self, state, bit_reader, descriptor, nbytes_min_value):
state.decoded_descriptors.append(descriptor)
min_value = bit_reader.read_bytes(nbytes_min_value)
nbits_diff = bit_reader.read_uint(NBITS_FOR_NBITS_DIFF)
if min_value in (b'\0' * nbytes_min_value or b'\xff' * nbytes_min_value):
min_value = b''
# special cases: all missing or all equals
if min_value is None or nbits_diff == 0:
assert nbits_diff == 0, ('{}: nbits_diff must be zero for compressed '
'values that are all missing or equal'.format(descriptor))
for decoded_values in state.decoded_values_all_subsets:
decoded_values.append(min_value)
else:
for decoded_values in state.decoded_values_all_subsets:
diff_value = bit_reader.read_bytes(nbits_diff)
decoded_values.append(min_value + diff_value)
def process_codeflag(self, state, bit_reader, descriptor, nbits):
(self.process_codeflag_compressed if state.is_compressed else
self.process_codeflag_uncompressed)(state, bit_reader, descriptor, nbits)
def process_codeflag_uncompressed(self, state, bit_reader, descriptor, nbits):
state.decoded_descriptors.append(descriptor)
state.decoded_values.append(bit_reader.read_uint_or_none(nbits))
def process_codeflag_compressed(self, state, bit_reader, descriptor, nbits_min_value):
state.decoded_descriptors.append(descriptor)
min_value = bit_reader.read_uint_or_none(nbits_min_value)
nbits_diff = bit_reader.read_uint(NBITS_FOR_NBITS_DIFF)
# special cases: all missing or all equals
if min_value is None or nbits_diff == 0:
assert nbits_diff == 0, ('{}: nbits_diff must be zero for compressed '
'values that are all missing or equal'.format(descriptor))
for decoded_values in state.decoded_values_all_subsets:
decoded_values.append(min_value)
else:
for decoded_values in state.decoded_values_all_subsets:
diff = bit_reader.read_uint_or_none(nbits_diff)
if diff is None:
value = None
else:
value = min_value + diff
# Still need to check for missing values, e.g. 4 bits code with a value of 15
# is actually a missing value
if descriptor.nbits > 1 and value == NUMERIC_MISSING_VALUES[descriptor.nbits]:
value = None
decoded_values.append(value)
def process_new_refval(self, state, bit_reader, descriptor, nbits):
(self.process_new_refval_compressed if state.is_compressed else
self.process_new_refval_uncompressed)(state, bit_reader, descriptor, nbits)
def process_new_refval_uncompressed(self, state, bit_reader, descriptor, nbits):
state.decoded_descriptors.append(descriptor)
# NOTE read_int NOT read_uint
state.new_refvals[descriptor.id] = value = bit_reader.read_int(nbits)
# TODO: new descriptor type for new refval
state.decoded_values.append(value)
def process_new_refval_compressed(self, state, bit_reader, descriptor, nbits_min_value):
state.decoded_descriptors.append(descriptor)
min_value = bit_reader.read_int(nbits_min_value)
nbits_diff = bit_reader.read_uint(NBITS_FOR_NBITS_DIFF)
assert nbits_diff == 0, ('{}: New reference values must be identical '
'for all subsets for compressed data'.format(descriptor))
for decoded_values in state.decoded_values_all_subsets:
decoded_values.append(min_value)
state.new_refvals[descriptor.id] = min_value
# TODO: new descriptor type for new refval
# TODO: this method can be removed if we don't use compiled template.
def process_numeric_of_new_refval(self, state, bit_reader,
descriptor, nbits, scale_powered,
refval_factor):
self.process_numeric(state, bit_reader, descriptor, nbits, scale_powered,
state.new_refvals[descriptor.id] * refval_factor)
def process_constant(self, state, bit_reader, descriptor, value):
(self.process_constant_compressed if state.is_compressed else
self.process_constant_uncompressed)(state, bit_reader, descriptor, value)
def process_constant_uncompressed(self, state, bit_reader, descriptor, value):
state.decoded_descriptors.append(descriptor)
state.decoded_values.append(value)
def process_constant_compressed(self, state, bit_reader, descriptor, value):
state.decoded_descriptors.append(descriptor)
for decoded_values in state.decoded_values_all_subsets:
decoded_values.append(value)
DATA_CATEGORY_DEFINE_BUFR_TABLES = 11
def generate_bufr_message(decoder, s, info_only=False, continue_on_error=False, filter_expr=None,
*args, **kwargs):
"""
This is a generator function that processes the given string for one
or more BufrMessage till it is exhausted.
:param Decoder decoder: Decoder to use
:param bytes s: String to decode for messages
:return: BufrMessage object
"""
sr = ScriptRunner(filter_expr, mode='eval') if filter_expr is not None else None
idx_start = 0
while idx_start < len(s):
idx_start = s.find(MESSAGE_START_SIGNATURE, idx_start)
if idx_start < 0:
return
try:
matched = True
if filter_expr:
bufr_message = decoder.process(
s[idx_start:], start_signature=None, info_only=True, *args, **kwargs
)
matched = sr.run(bufr_message)
if matched and not info_only:
bufr_message = decoder.process(
s[idx_start:], start_signature=None, info_only=False, *args, **kwargs
)
else:
bufr_message = decoder.process(
s[idx_start:], start_signature=None, info_only=info_only, *args, **kwargs
)
# If data section is not decoded, we rely on the declared length for the message length
if info_only:
bufr_message.serialized_bytes = s[idx_start: idx_start + bufr_message.length.value]
else:
if (bufr_message.data_category.value == DATA_CATEGORY_DEFINE_BUFR_TABLES
and bufr_message.n_subsets.value > 0):
_, b_entries, d_entries = BufrTableDefinitionProcessor().process(bufr_message)
TableGroupCacheManager.invalidate()
TableGroupCacheManager.add_extra_entries(b_entries, d_entries)
idx_start += len(bufr_message.serialized_bytes)
if matched:
yield bufr_message
except PyBufrKitError as e:
if not continue_on_error:
raise e
print('Continuing on next message and ignoring error: {}'.format(e), file=sys.stderr)
if info_only:
idx_start += 1
else:
try:
bufr_message = decoder.process(
s[idx_start:], start_signature=None, info_only=True, *args, **kwargs)
idx_start += bufr_message.length.value
except PyBufrKitError:
idx_start += 1
| {
"content_hash": "708c8ddac5e7255d9eea9619281e6798",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 112,
"avg_line_length": 45.330376940133036,
"alnum_prop": 0.6136274701623948,
"repo_name": "ywangd/pybufrkit",
"id": "044e9ef8699f91378768a52d4c79e41da1944e12",
"size": "20444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybufrkit/decoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "320418"
}
],
"symlink_target": ""
} |
import copy
from tempest.lib.api_schema.response.compute.v2_26 import servers as servers226
create_backup = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'image_id': {'type': 'string', 'format': 'uuid'}
},
'additionalProperties': False,
'required': ['image_id']
}
}
# NOTE(gmann): Below are the unchanged schema in this microversion. We
# need to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
# ****** Schemas unchanged since microversion 2.26 ***
get_server = copy.deepcopy(servers226.get_server)
list_servers_detail = copy.deepcopy(servers226.list_servers_detail)
update_server = copy.deepcopy(servers226.update_server)
rebuild_server = copy.deepcopy(servers226.rebuild_server)
rebuild_server_with_admin_pass = copy.deepcopy(
servers226.rebuild_server_with_admin_pass)
show_server_diagnostics = copy.deepcopy(servers226.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(servers226.get_remote_consoles)
list_tags = copy.deepcopy(servers226.list_tags)
update_all_tags = copy.deepcopy(servers226.update_all_tags)
delete_all_tags = copy.deepcopy(servers226.delete_all_tags)
check_tag_existence = copy.deepcopy(servers226.check_tag_existence)
update_tag = copy.deepcopy(servers226.update_tag)
delete_tag = copy.deepcopy(servers226.delete_tag)
list_servers = copy.deepcopy(servers226.list_servers)
attach_volume = copy.deepcopy(servers226.attach_volume)
show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
| {
"content_hash": "5ac1a0d189b22c027a26c2053a6d5c77",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 46.24324324324324,
"alnum_prop": 0.7504383401519579,
"repo_name": "openstack/tempest",
"id": "cb0fc134652368c59c5cafe5a0a2eeea9c148210",
"size": "2284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/lib/api_schema/response/compute/v2_45/servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5364077"
},
{
"name": "Shell",
"bytes": "8684"
}
],
"symlink_target": ""
} |
"""Automatically format references in a LaTeX file."""
import argparse
from multiprocessing import Pool
from reference_utils import Reference, extract_bibtex_items
from latex_utils import read_latex_file, write_latex_file
class ReferenceFormatter:
def __init__(self, add_arxiv):
self.add_arxiv = add_arxiv
def get_reference(self, bibtex_entry):
"""Wrapper for multithreading."""
reference = Reference(bibtex_entry.rstrip(), self.add_arxiv)
reference.main()
return reference.bibitem_data, reference.bibitem_identifier, reference.reformatted_original_reference, reference.formatted_reference
def format_references(self, latex_source):
"""Format all references in the given LaTeX source."""
bibtex_entries = extract_bibtex_items(latex_source)
# Parallelising the reference lookup gives a 15x speedup.
# Values larger than 15 for the poolsize do not give a further speedup.
with Pool(15) as pool:
res = pool.map(self.get_reference, bibtex_entries)
for r in res:
bibitem_data, bibitem_identifier, reformatted_original_reference, formatted_reference = r
latex_source = latex_source.replace(bibitem_data, f"\\bibitem{{{bibitem_identifier}}} \\textcolor{{red}}{{TODO}}\n{reformatted_original_reference}\n\n%{formatted_reference}\n\n\n")
return latex_source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('latex_file')
parser.add_argument('--add_arxiv', action="store_true")
args = parser.parse_args()
latex_source = read_latex_file(args.latex_file)
print("Processing references...")
reference_formatter = ReferenceFormatter(args.add_arxiv)
latex_source = reference_formatter.format_references(latex_source)
write_latex_file(args.latex_file, latex_source)
| {
"content_hash": "127b6fe567d1998184a5de13e233d5a1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 192,
"avg_line_length": 44.523809523809526,
"alnum_prop": 0.7,
"repo_name": "teunzwart/latex-production-tools",
"id": "f670021d80d6cae2498ea3274e3932292a4b1902",
"size": "1870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reference_formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "48712"
}
],
"symlink_target": ""
} |
'''
Created on Jun 4, 2014
@author: sstober
'''
# import socket;
import logging;
log = logging.getLogger(__name__);
import argparse;
import random;
from deepthought.util.config_util import init_logging, load_config_file, empty_config;
from deepthought.util.yaml_util import load_yaml_template, load_yaml;
from deepthought.util.class_util import load_class;
if __name__ == '__main__':
init_logging(pylearn2_loglevel=logging.INFO);
# print args
# parse arguments using optparse or argparse or what have you
parser = argparse.ArgumentParser(prog='run_train',
description='run a train algorithm as specified by a YAML file');
# global options
parser.add_argument('yaml', default='train.yaml', help='path of the YAML file to run');
parser.add_argument("-c", "--config", #type=str,
help="specify a config file");
parser.add_argument("-l", "--localizer", #type=str,
help="specify a custom localizer");
args = parser.parse_args();
train_yaml = load_yaml_template(args.yaml);
# load optional settings
if args.config is not None:
config = load_config_file(args.config);
else:
config = empty_config();
if not hasattr(config, 'random_seed'):
random_seed = random.randint(0, 100);
config.random_seed = random_seed;
log.debug('using random seed {}'.format(random_seed))
# load optional localizer
if args.localizer is not None:
localizer_class = args.localizer;
else:
localizer_class = config.get('localizer_class',
'deepthought.datasets.rwanda2013rhythms.PathLocalizer'); # for compatibility with old code
localizer = load_class(localizer_class);
# localize settings
config = localizer.localize_config(config);
# apply settings
train_yaml = train_yaml % config;
# localize YAML
train_yaml = localizer.localize_yaml(train_yaml);
train, _ = load_yaml(train_yaml);
train.main_loop(); | {
"content_hash": "ac18b09299ff28022d02f6a520fb628a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 132,
"avg_line_length": 33.44776119402985,
"alnum_prop": 0.5899152164212406,
"repo_name": "sstober/deepthought",
"id": "c5fb60f8704d46a5630dd30914749de28a7891b2",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepthought/experiments/nips2014/scripts/run_train.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56147125"
},
{
"name": "PureBasic",
"bytes": "2515"
},
{
"name": "Python",
"bytes": "446130"
},
{
"name": "Shell",
"bytes": "4966"
}
],
"symlink_target": ""
} |
from werkzeug import check_password_hash, generate_password_hash
from antiques import db
from antiques.core.date import utc_now
from antiques.core.mixins import ActiveRecordMixin
class User(ActiveRecordMixin, db.Model):
""" regular user """
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, unique=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
created_at = db.Column(db.DateTime(timezone=True), default=utc_now)
active = db.Column(db.Boolean, default=True)
staff = db.Column(db.Boolean, default=False)
superuser = db.Column(db.Boolean, default=False)
@property
def full_name(self):
return u'{} {}'.format(self.first_name, self.last_name)
def __repr__(self):
return u'<User %r>' % self.email
def __unicode__(self):
return self.fullname
# Methods for Flask-Login
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
# End Flask-Login methods
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
| {
"content_hash": "8c2ccfed31d1ce49e32a86477a4b1f7a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 71,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.6653061224489796,
"repo_name": "vgamula/breakyourantiques",
"id": "d11c9b899cb9b0c41873f888358281b23cd0a183",
"size": "1470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "antiques/auth/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45755"
},
{
"name": "HTML",
"bytes": "24058"
},
{
"name": "JavaScript",
"bytes": "78"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "12066"
}
],
"symlink_target": ""
} |
from collections import defaultdict, namedtuple
from . import globalvars
try:
basestring
except NameError: # python 3
basestring = str
def stringArray(strings):
return "[NSArray arrayWithObjects:%s,nil]" % ','.join(('@"%s"' % s) for s in strings)
def wrapString(s):
s = s.replace('\n', '\\n').replace('"', '\\"')
return '@"%s"' % s
def convertValueToObjc(value, requireNSObject=False):
if value is None:
return 'nil'
elif isinstance(value, KeyValueId):
return value._objcAccessor()
elif hasattr(value, 'objcValue'):
return value.objcValue()
elif isinstance(value, basestring):
result = wrapString(value)
# '-' is the string we use for menu separators and we don't want to localize these.
if value and value != '-' and globalvars.globalLocalizationTable:
result = 'NSLocalizedStringFromTable(%s, @"%s", @"")' % (result, globalvars.globalLocalizationTable)
return result
elif isinstance(value, bool):
result = 'YES' if value else 'NO'
if requireNSObject:
result = '[NSNumber numberWithBool:{}]'.format(result)
return result
elif isinstance(value, (int, float)):
result = str(value)
if requireNSObject:
if isinstance(value, int):
method = '[NSNumber numberWithInteger:{}]'
else:
method = '[NSNumber numberWithDouble:{}]'
result = method.format(result)
return result
else:
raise TypeError("Can't figure out the property's type")
def generateDictionary(source):
elems = []
for key, value in source.items():
elems.append(convertValueToObjc(value, requireNSObject=True))
elems.append(convertValueToObjc(key))
elems.append('nil')
return '[NSDictionary dictionaryWithObjectsAndKeys:{}]'.format(','.join(elems))
class KeyValueId(object):
# When we set an KeyValueId attribute in our source file, there no convenient way of saying,
# at the codegen phase "this is exactly when this value was set, so I'll insert code to assign
# this value here." What we can do, however, is having a dictionary of all keys a certain value
# was assigned to and when we create the code for that value, we insert assignments right after.
VALUE2KEYS = defaultdict(set)
def __init__(self, parent, name):
self._parent = parent
self._name = name
self._children = {}
def __repr__(self):
return '<KeyValueId %s>' % self._objcAccessor()
def __getattr__(self, name):
if name.startswith('_'):
return object.__getattribute__(self, name)
if name in self._children:
result = self._children[name]
else:
result = KeyValueId(self, name)
self._children[name] = result
return result
def __setattr__(self, name, value):
if name.startswith('_'):
object.__setattr__(self, name, value)
return
key = getattr(self, name)
KeyValueId.VALUE2KEYS[value].add(key)
# the methods below aren't actually private, it's just that we prepend them with underscores to
# avoid name clashes.
def _objcAccessor(self):
if self._parent:
if self._parent._name == 'nil':
return 'nil'
else:
return '[%s %s]' % (self._parent._objcAccessor(), self._name)
else:
return self._name
def _callMethod(self, methodname, argument=None, endline=True):
# For now, this method only supports call to methods of zero or one argument.
if argument is None:
result = getattr(self, methodname)._objcAccessor()
else:
result = '[%s %s:%s]' % (self._objcAccessor(), methodname, convertValueToObjc(argument))
if endline:
result += ';\n'
return result
def _clear(self):
for child in self._children.values():
child._clear()
self._children.clear()
for keys in KeyValueId.VALUE2KEYS.values():
keys.discard(self)
class ConstGenerator(object):
def __getattr__(self, name):
return Literal(name)
Action = namedtuple('Action', 'target selector')
# Use this in properties when you need it to be generated as-is, and not wrapped as a normal string
class Literal(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<Literal %r>" % self.value
def __or__(self, other):
return Flags([self]) | other
def __eq__(self, other):
if not isinstance(other, Literal):
return False
return self.value == other.value
def __hash__(self):
return hash(self.value)
def objcValue(self):
return self.value
# Use this for strings that shouldn't be wrapped in NSLocalizedStringFromTable
class NonLocalizableString(object):
def __init__(self, value):
self.value = value
def objcValue(self):
return wrapString(self.value)
NLSTR = NonLocalizableString # The full class name can be pretty long sometimes...
# Use this for flags-based properties. Will be converted into a "|" joined literal
class Flags(set):
def __or__(self, other):
assert isinstance(other, Literal)
result = Flags(self)
result.add(other)
return result
def objcValue(self):
elems = ((e.value if isinstance(e, Literal) else e) for e in self)
return '|'.join(elems)
Binding = namedtuple('Binding', 'name target keyPath options')
| {
"content_hash": "a47c035458ec6b5641ef02cd611546a1",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 112,
"avg_line_length": 34.43636363636364,
"alnum_prop": 0.6078845476944738,
"repo_name": "hsoft/xibless",
"id": "a9949901b3a5fb07b05fc55c37a1e98a53951044",
"size": "5682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xibless/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Objective-C",
"bytes": "5994"
},
{
"name": "Python",
"bytes": "113224"
}
],
"symlink_target": ""
} |
"""TFDS dataset for SKAI."""
import dataclasses
import tensorflow as tf
import tensorflow_datasets as tfds
@dataclasses.dataclass
class SkaiDatasetConfig(tfds.core.BuilderConfig):
"""Configuration for SKAI datasets.
Any of the attributes can be left blank if they don't exist.
Attributes:
labeled_train_pattern: Pattern for labeled training examples tfrecords.
labeled_test_pattern: Pattern for labeled test examples tfrecords.
unlabeled_pattern: Pattern for unlabeled examples tfrecords.
"""
labeled_train_pattern: str = ''
labeled_test_pattern: str = ''
unlabeled_pattern: str = ''
class SkaiDataset(tfds.core.GeneratorBasedBuilder):
"""TFDS dataset for SKAI.
Example usage:
import tensorflow_datasets.public_api as tfds
from skai import dataset
ds = tfds.load('skai_dataset', builder_kwargs={
'config': SkaiDatasetConfig(
name='example',
labeled_train_pattern='gs://path/to/train_labeled_examples.tfrecord',
labeled_test_pattern='gs://path/to/test_labeled_examples.tfrecord',
unlabeled_pattern='gs://path/to/unlabeled_examples-*.tfrecord')
})
labeled_train_dataset = ds['labeled_train']
labeled_test_dataset = ds['labeled_test']
unlabeled_test_dataset = ds['unlabeled']
"""
VERSION = tfds.core.Version('1.0.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description='Skai',
features=tfds.features.FeaturesDict({
'coordinates':
tfds.features.Tensor(shape=(2,), dtype=tf.float32),
'encoded_coordinates':
tfds.features.Tensor(shape=(), dtype=tf.string),
'pre_image_png':
tfds.features.Tensor(shape=(64, 64, 3), dtype=tf.uint8),
'post_image_png':
tfds.features.Tensor(shape=(64, 64, 3), dtype=tf.uint8),
'label':
tfds.features.Tensor(shape=(), dtype=tf.float32)
}))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
splits = {}
if self.builder_config.labeled_train_pattern:
splits['labeled_train'] = self._generate_examples(
self.builder_config.labeled_train_pattern)
if self.builder_config.labeled_test_pattern:
splits['labeled_test'] = self._generate_examples(
self.builder_config.labeled_test_pattern)
if self.builder_config.unlabeled_pattern:
splits['unlabeled'] = self._generate_examples(
self.builder_config.unlabeled_pattern)
return splits
def _decode_record(self, record_bytes):
features = tf.io.parse_single_example(
record_bytes,
{
'coordinates': tf.io.FixedLenFeature([2], dtype=tf.float32),
'encoded_coordinates': tf.io.FixedLenFeature([], dtype=tf.string),
'pre_image_png': tf.io.FixedLenFeature([], dtype=tf.string),
'post_image_png': tf.io.FixedLenFeature([], dtype=tf.string),
'label': tf.io.FixedLenFeature([], dtype=tf.float32),
})
example_id = features['encoded_coordinates']
features['pre_image_png'] = tf.io.decode_image(features['pre_image_png'])
features['post_image_png'] = tf.io.decode_image(features['post_image_png'])
return example_id, features
def _generate_examples(self, pattern: str):
if not pattern:
return
paths = tf.io.gfile.glob(pattern)
ds = tf.data.TFRecordDataset(paths).map(self._decode_record)
return ds.as_numpy_iterator()
| {
"content_hash": "7da530eace0dd78d7a94b2038505458a",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 36.88421052631579,
"alnum_prop": 0.6532534246575342,
"repo_name": "google-research/skai",
"id": "fa0ad068be60786582cf100596b61cd18ebf314e",
"size": "4080",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/skai/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "368064"
},
{
"name": "Shell",
"bytes": "1252"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import Form
from django.template import loader
from django.core.mail import EmailMultiAlternatives
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User,Group
from django.contrib.auth.forms import UserCreationForm,SetPasswordForm,PasswordResetForm
# apps internas y externas
from empleadosapp.models import Empleado
from cuentas_usuarioapp.models import UsuarioEmpleado
UserModel = get_user_model()
#.filter(estadoEmpleado='A') .... agregar estadoEmpleado en Empleado
class UsuarioForm(Form):
codigoEmpleado = forms.ModelChoiceField(widget=forms.Select(attrs={'name':'empleado','class':'selectpicker','data-live-search':'true'}),queryset=Empleado.objects.exclude(codigoEmpleado__in=UsuarioEmpleado.objects.all().values_list('codigoEmpleado_id')),label="Empleado",help_text="(*)")
email = forms.EmailField(widget=forms.EmailInput(attrs={'name':'email','class':'form-control','id':'email','maxlength':'30'}),label="Email",help_text="(*)")
grupos = forms.ModelMultipleChoiceField(widget=forms.CheckboxSelectMultiple(attrs={'name':'grupos[]','class':'checkbox-inline'}),queryset=Group.objects.all())
#password = forms.CharField(widget=forms.TextInput(attrs={'name':'clave','maxlength':'16','class':'form-control','value':'{{pass}}'}),label="Clave",help_text="(*)")
"""
class CrearUsuario(UserCreationForm):
username = forms.CharField(widget=forms.TextInput(attrs={'name':'usuario','maxlength':'50','class':'form-control'}),label="Usuario",help_text="(*)")
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'name':'contra1','maxlength':'50','class':'form-control'}),label="Contrasena",help_text="(*)")
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'name':'contra2','maxlength':'50','class':'form-control'}),label="Contrasena",help_text="(*)")
email = forms.EmailField(widget=forms.EmailInput(attrs={'name':'correo','class':'form-control'}),label="correo",required=False)
nombres = forms.CharField(widget=forms.TextInput(attrs={'name':'nombres','maxlength':'50','class':'form-control'}),label="Nombres",help_text="(*)")
apellidos = forms.CharField(widget=forms.TextInput(attrs={'name':'apellidos','maxlength':'50','class':'form-control'}),label="Apellidos",help_text="(*)")
grupos = forms.ModelMultipleChoiceField(widget=forms.CheckboxSelectMultiple(attrs={'name':'grupos','class':'checkbox-inline'}),queryset=Group.objects.all())
class Meta:
model = User
fields = ('username','password1','password2','email','nombres','apellidos','grupos')
def save(self,commit=True):
user = super(UserCreationForm,self).save(commit=False)
user.username = self.cleaned_data["username"]
user.password1 = self.cleaned_data["password1"]
user.password2 = self.cleaned_data["password2"]
user.email = self.cleaned_data["email"]
user.first_name = self.cleaned_data["nombres"]
user.last_name = self.cleaned_data["apellidos"]
#user.groups = self.cleaned_data["grupos"]
if commit:
user.save()
return user """
class CambiarPassword(forms.Form):
pass1 = forms.CharField(label="Nuevo Password",widget=forms.PasswordInput(attrs={"class":'form-control','name':'pass1'}))
pass2 = forms.CharField(label="Confirmar Password",widget=forms.PasswordInput(attrs={"class":'form-control','name':'pass2'}))
class ResetPasswordForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(attrs={'name':'correo','class':'form-control'}),label="Correo Electronico",max_length=254)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
active_users = UserModel._default_manager.filter(**{
'email__iexact': email,
'is_active': True,
})
return (u for u in active_users if u.has_usable_password())
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None,
extra_email_context=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
email = self.cleaned_data["email"]
for user in self.get_users(email):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
if extra_email_context is not None:
context.update(extra_email_context)
self.send_mail(
subject_template_name, email_template_name, context, from_email,
email, html_email_template_name=html_email_template_name,
)
| {
"content_hash": "14ee9b138e9d113fb4f8637decbe0f43",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 287,
"avg_line_length": 54.78688524590164,
"alnum_prop": 0.6666666666666666,
"repo_name": "anderson7ru/bienestarues",
"id": "834af450898f631f3e7834810ec0eba404d2dc7f",
"size": "6708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuentas_usuarioapp/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4870"
},
{
"name": "HTML",
"bytes": "644516"
},
{
"name": "JavaScript",
"bytes": "445466"
},
{
"name": "Python",
"bytes": "446004"
}
],
"symlink_target": ""
} |
from gapic.schema import wrappers
MIXINS_MAP = {
'DeleteOperation': wrappers.MixinMethod(
'DeleteOperation',
request_type='operations_pb2.DeleteOperationRequest',
response_type='None'
),
'WaitOperation': wrappers.MixinMethod(
'WaitOperation',
request_type='operations_pb2.WaitOperationRequest',
response_type='operations_pb2.Operation'
),
'ListOperations': wrappers.MixinMethod(
'ListOperations',
request_type='operations_pb2.ListOperationsRequest',
response_type='operations_pb2.ListOperationsResponse'
),
'CancelOperation': wrappers.MixinMethod(
'CancelOperation',
request_type='operations_pb2.CancelOperationRequest',
response_type='None'
),
'GetOperation': wrappers.MixinMethod(
'GetOperation',
request_type='operations_pb2.GetOperationRequest',
response_type='operations_pb2.Operation'
),
'TestIamPermissions': wrappers.MixinMethod(
'TestIamPermissions',
request_type='iam_policy_pb2.TestIamPermissionsRequest',
response_type='iam_policy_pb2.TestIamPermissionsResponse'
),
'GetIamPolicy': wrappers.MixinMethod(
'GetIamPolicy',
request_type='iam_policy_pb2.GetIamPolicyRequest',
response_type='policy_pb2.Policy'
),
'SetIamPolicy': wrappers.MixinMethod(
'SetIamPolicy',
request_type='iam_policy_pb2.SetIamPolicyRequest',
response_type='policy_pb2.Policy'
),
'ListLocations': wrappers.MixinMethod(
'ListLocations',
request_type='locations_pb2.ListLocationsRequest',
response_type='locations_pb2.ListLocationsResponse'
),
'GetLocation': wrappers.MixinMethod(
'GetLocation',
request_type='locations_pb2.GetLocationRequest',
response_type='locations_pb2.Location'
)
}
| {
"content_hash": "0abbd3597806123eb6101dcc03cb3e4c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 65,
"avg_line_length": 34.98148148148148,
"alnum_prop": 0.6701958708311275,
"repo_name": "googleapis/gapic-generator-python",
"id": "89fccdd573dc4a7d9029adf2b8d8aa5a09b757c3",
"size": "2465",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gapic/schema/mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2702"
},
{
"name": "Jinja",
"bytes": "767902"
},
{
"name": "Python",
"bytes": "4802905"
},
{
"name": "Shell",
"bytes": "31013"
},
{
"name": "Starlark",
"bytes": "26281"
}
],
"symlink_target": ""
} |
import re
import string
import ConfigParser
import os
import sys
import socket
import fcntl
import struct
import base64
import zlib
import json
import time
import subprocess
import urllib
import urllib2
from core.utils import Utils
class Colors(object):
N = '\033[m' # native
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
class ProgressBar():
def __init__(self, end=100, width=10, title="", display=None):
self.end = end
self.width = width
self.title = title
self.display = display
self.progress = float(0)
self.bar_format = '[%(fill)s>%(blank)s] %(progress)s%% - %(title)s'
self.rotate_format = '[Processing: %(mark)s] %(title)s'
self.markers='|/-\\'
self.curmark = -1
self.completed = False
self.reset()
def reset(self, end=None, width=None, title=""):
self.progress = float(0)
self.completed = False
if( end):
self.end = end
if (width):
self.width = width
self.curmark = -1
self.title = title
def inc(self, num=1):
if (not self.completed):
self.progress += num
cur_width = (self.progress / self.end) * self.width
fill = int(cur_width) * "-"
blank = (self.width - int(cur_width)) * " "
percentage = int((self.progress / self.end) * 100)
if (self.display):
self.display.verbose(self.bar_format % {'title': self.title, 'fill': fill, 'blank': blank, 'progress': percentage}, rewrite=True, end="", flush=True)
else:
sys.stdout.write('\r' + self.bar_format % {'title': self.title, 'fill': fill, 'blank': blank, 'progress': percentage})
sys.stdout.flush()
if (self.progress == self.end):
self.done()
return self.completed
def done(self):
print
self.completed = True
def rotate(self):
if (not self.completed):
self.curmark = (self.curmark + 1) % len(self.markers)
if (self.display):
self.display.verbose(self.rotate_format % {'title': self.title, 'mark': self.markers[self.curmark]}, rewrite=True, end="", flush=True)
else:
sys.stdout.write('\r' + self.rotate_format % {'title': self.title, 'mark': self.markers[self.curmark]})
sys.stdout.flush()
return self.completed
class Display():
def __init__(self, verbose=False, debug=False, logpath=None):
self.VERBOSE = verbose
self.DEBUG = debug
self.logpath = logpath
self.ruler = '-'
def setLogPath(self, logpath):
self.logpath = logpath
def enableVerbose(self):
self.VERBOSE = True
def enableDebug(self):
self.DEBUG = True
def log(self, s, filename="processlog.txt"):
if (self.logpath is not None):
fullfilename = self.logpath + filename
if not os.path.exists(os.path.dirname(fullfilename)):
os.makedirs(os.path.dirname(fullfilename))
fp = open(fullfilename, "a")
if (filename == "processlog.txt"):
fp.write(time.strftime("%Y.%m.%d-%H.%M.%S") + " - " + s + "\n")
else:
fp.write(s)
fp.close()
def _display(self, line, end="\n", flush=True, rewrite=False):
if (rewrite):
line = '\r' + line
sys.stdout.write(line + end)
if (flush):
sys.stdout.flush()
self.log(line)
def error(self, line, end="\n", flush=True, rewrite=False):
'''Formats and presents errors.'''
line = line[:1].upper() + line[1:]
s = '%s[!] %s%s' % (Colors.R, Utils.to_unicode(line), Colors.N)
self._display(s, end=end, flush=flush, rewrite=rewrite)
def output(self, line, end="\n", flush=True, rewrite=False):
'''Formats and presents normal output.'''
s = '%s[*]%s %s' % (Colors.B, Colors.N, Utils.to_unicode(line))
self._display(s, end=end, flush=flush, rewrite=rewrite)
def alert(self, line, end="\n", flush=True, rewrite=False):
'''Formats and presents important output.'''
s = '%s[*]%s %s' % (Colors.G, Colors.N, Utils.to_unicode(line))
self._display(s, end=end, flush=flush, rewrite=rewrite)
def verbose(self, line, end="\n", flush=True, rewrite=False):
'''Formats and presents output if in verbose mode.'''
if self.VERBOSE:
self.output("[VERBOSE] " + line, end=end, flush=True, rewrite=rewrite)
def debug(self, line, end="\n", flush=True, rewrite=False):
'''Formats and presents output if in debug mode (very verbose).'''
if self.DEBUG:
self.output("[DEBUG] " + line, end=end, flush=True, rewrite=rewrite)
def yn(self, line, default=None):
valid = {"yes": True, "y": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif (default.lower() == "yes") or (default.lower() == "y"):
prompt = " [Y/n] "
elif (default.lower() == "no") or (default.lower() == "n"):
prompt = " [y/N] "
else:
print "ERROR: Please provide a valid default value: no, n, yes, y, or None"
while True:
choice = self.input(line + prompt)
if default is not None and choice == '':
return valid[default.lower()]
elif choice.lower() in valid:
return valid[choice.lower()]
else:
self.alert("Please respond with 'yes/no' or 'y/n'.")
def selectlist(self, line, input_list):
answers = []
if input_list != []:
i = 1
for item in input_list:
self.output(str(i) + ": " + str(item))
i = i + 1
else:
return answers
choice = self.input(line)
if not choice:
return answers
answers = (choice.replace(' ', '')).split(',')
return answers
def input(self, line):
'''Formats and presents an input request to the user'''
s = '%s[?]%s %s' % (Colors.O, Colors.N, Utils.to_unicode(line))
answer = raw_input(s)
return answer
def heading(self, line):
'''Formats and presents styled header text'''
line = Utils.to_unicode(line)
self.output(self.ruler*len(line))
self.output(line.upper())
self.output(self.ruler*len(line))
def print_list(self, title, _list):
self.heading(title)
if _list != []:
for item in _list:
self.output(item)
else:
self.output("None")
| {
"content_hash": "ab2f585f03b4f12ab83c51110418a0d3",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 165,
"avg_line_length": 33.58128078817734,
"alnum_prop": 0.5386533665835411,
"repo_name": "ru-faraon/SPF",
"id": "2f12874500ebaf469f395c47aff0b28aa23b261e",
"size": "6817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spf/core/display.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "104751"
},
{
"name": "JavaScript",
"bytes": "95383"
},
{
"name": "Python",
"bytes": "143549"
}
],
"symlink_target": ""
} |
from example.api.controllers.v1 import controller
Controller = controller.Controller
__all__ = (Controller)
| {
"content_hash": "2b9ad4039a180c8b77f6176efce25736",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 22,
"alnum_prop": 0.7818181818181819,
"repo_name": "JimJiangX/BoneDragon",
"id": "44e8ef72c80ee9b9700b24be7d957c3a50022832",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/api/controllers/v1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74985"
},
{
"name": "Shell",
"bytes": "1030"
}
],
"symlink_target": ""
} |
import bigtempo.processors.dataframe_task as task
def sample_datasource_factory(cls):
return cls()
datasource_factory = sample_datasource_factory
task_factory = task.factory
| {
"content_hash": "2d3c735db7b18044e67bcd384335e5f0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 20.22222222222222,
"alnum_prop": 0.7857142857142857,
"repo_name": "rhlobo/bigtempo",
"id": "5bb6a659a5e2b9649bce24279d4ae54092580034",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigtempo/defaults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97880"
},
{
"name": "Shell",
"bytes": "4896"
}
],
"symlink_target": ""
} |
"""Contains calculation of various derived indices."""
import numpy as np
from .thermo import mixing_ratio, saturation_vapor_pressure
from .tools import _remove_nans, get_layer
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', bottom='[pressure]', top='[pressure]')
def precipitable_water(pressure, dewpoint, *, bottom=None, top=None):
r"""Calculate precipitable water through the depth of a sounding.
Formula used is:
.. math:: -\frac{1}{\rho_l g} \int\limits_{p_\text{bottom}}^{p_\text{top}} r dp
from [Salby1996]_, p. 28.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
dewpoint : `pint.Quantity`
Atmospheric dewpoint profile
bottom: `pint.Quantity`, optional
Bottom of the layer, specified in pressure. Defaults to None (highest pressure).
top: `pint.Quantity`, optional
The top of the layer, specified in pressure. Defaults to None (lowest pressure).
Returns
-------
`pint.Quantity`
The precipitable water in the layer
Examples
--------
>>> pressure = np.array([1000, 950, 900]) * units.hPa
>>> dewpoint = np.array([20, 15, 10]) * units.degC
>>> pw = precipitable_water(pressure, dewpoint)
"""
# Sort pressure and dewpoint to be in decreasing pressure order (increasing height)
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
dewpoint = dewpoint[sort_inds]
pressure, dewpoint = _remove_nans(pressure, dewpoint)
if top is None:
top = np.nanmin(pressure.magnitude) * pressure.units
if bottom is None:
bottom = np.nanmax(pressure.magnitude) * pressure.units
pres_layer, dewpoint_layer = get_layer(pressure, dewpoint, bottom=bottom,
depth=bottom - top)
w = mixing_ratio(saturation_vapor_pressure(dewpoint_layer), pres_layer)
# Since pressure is in decreasing order, pw will be the opposite sign of that expected.
pw = -1. * (np.trapz(w.magnitude, pres_layer.magnitude) * (w.units * pres_layer.units)
/ (mpconsts.g * mpconsts.rho_l))
return pw.to('millimeters')
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def mean_pressure_weighted(pressure, *args, height=None, bottom=None, depth=None):
r"""Calculate pressure-weighted mean of an arbitrary variable through a layer.
Layer top and bottom specified in height or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
args : `pint.Quantity`
Parameters for which the pressure-weighted mean is to be calculated.
height : `pint.Quantity`, optional
Heights from sounding. Standard atmosphere heights assumed (if needed)
if no heights are given.
bottom: `pint.Quantity`, optional
The bottom of the layer in either the provided height coordinate
or in pressure. Don't provide in meters AGL unless the provided
height coordinate is meters AGL. Default is the first observation,
assumed to be the surface.
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa.
Returns
-------
`pint.Quantity`
u_mean: u-component of layer mean wind.
`pint.Quantity`
v_mean: v-component of layer mean wind.
"""
ret = [] # Returned variable means in layer
layer_arg = get_layer(pressure, *args, height=height,
bottom=bottom, depth=depth)
layer_p = layer_arg[0]
layer_arg = layer_arg[1:]
# Taking the integral of the weights (pressure) to feed into the weighting
# function. Said integral works out to this function:
pres_int = 0.5 * (layer_p[-1].magnitude**2 - layer_p[0].magnitude**2)
for i, datavar in enumerate(args):
arg_mean = np.trapz((layer_arg[i] * layer_p).magnitude,
x=layer_p.magnitude) / pres_int
ret.append(arg_mean * datavar.units)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[speed]', '[speed]', '[length]')
def bunkers_storm_motion(pressure, u, v, height):
r"""Calculate the Bunkers right-mover and left-mover storm motions and sfc-6km mean flow.
Uses the storm motion calculation from [Bunkers2000]_.
Parameters
----------
pressure : `pint.Quantity`
Pressure from sounding
u : `pint.Quantity`
U component of the wind
v : `pint.Quantity`
V component of the wind
height : `pint.Quantity`
Height from sounding
Returns
-------
right_mover: `pint.Quantity`
U and v component of Bunkers RM storm motion
left_mover: `pint.Quantity`
U and v component of Bunkers LM storm motion
wind_mean: `pint.Quantity`
U and v component of sfc-6km mean flow
"""
# mean wind from sfc-6km
wind_mean = concatenate(mean_pressure_weighted(pressure, u, v, height=height,
depth=6000 * units('meter')))
# mean wind from sfc-500m
wind_500m = concatenate(mean_pressure_weighted(pressure, u, v, height=height,
depth=500 * units('meter')))
# mean wind from 5.5-6km
wind_5500m = concatenate(mean_pressure_weighted(pressure, u, v, height=height,
depth=500 * units('meter'),
bottom=height[0] + 5500 * units('meter')))
# Calculate the shear vector from sfc-500m to 5.5-6km
shear = wind_5500m - wind_500m
# Take the cross product of the wind shear and k, and divide by the vector magnitude and
# multiply by the deviaton empirically calculated in Bunkers (2000) (7.5 m/s)
shear_cross = concatenate([shear[1], -shear[0]])
shear_mag = np.hypot(*(arg.magnitude for arg in shear)) * shear.units
rdev = shear_cross * (7.5 * units('m/s').to(u.units) / shear_mag)
# Add the deviations to the layer average wind to get the RM motion
right_mover = wind_mean + rdev
# Subtract the deviations to get the LM motion
left_mover = wind_mean - rdev
return right_mover, left_mover, wind_mean
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[speed]', '[speed]')
def bulk_shear(pressure, u, v, height=None, bottom=None, depth=None):
r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind.
v : `pint.Quantity`
V-component of wind.
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
are in meters AGL.) Defaults to the highest pressure or lowest height given.
Returns
-------
u_shr: `pint.Quantity`
u-component of layer bulk shear
v_shr: `pint.Quantity`
v-component of layer bulk shear
"""
_, u_layer, v_layer = get_layer(pressure, u, v, height=height,
bottom=bottom, depth=depth)
u_shr = u_layer[-1] - u_layer[0]
v_shr = v_layer[-1] - v_layer[0]
return u_shr, v_shr
@exporter.export
@preprocess_xarray
@check_units('[energy] / [mass]', '[speed] * [speed]', '[speed]')
def supercell_composite(mucape, effective_storm_helicity, effective_shear):
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
.. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} *
\frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} *
\frac{\text{Effective Shear}}{20 \text{m/s}}
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : `pint.Quantity`
Effective bulk shear
Returns
-------
`pint.Quantity`
supercell composite
"""
effective_shear = np.clip(np.atleast_1d(effective_shear), None, 20 * units('m/s'))
effective_shear[effective_shear < 10 * units('m/s')] = 0 * units('m/s')
effective_shear = effective_shear / (20 * units('m/s'))
return ((mucape / (1000 * units('J/kg')))
* (effective_storm_helicity / (50 * units('m^2/s^2')))
* effective_shear).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[energy] / [mass]', '[length]', '[speed] * [speed]', '[speed]')
def significant_tornado(sbcape, surface_based_lcl_height, storm_helicity_1km, shear_6km):
r"""Calculate the significant tornado parameter (fixed layer).
The significant tornado parameter is designed to identify
environments favorable for the production of significant
tornadoes contingent upon the development of supercells.
It's calculated according to the formula used on the SPC
mesoanalysis page, updated in [Thompson2004]_:
.. math:: \text{SIGTOR} = \frac{\text{SBCAPE}}{1500 \text{J/kg}} * \frac{(2000 \text{m} -
\text{LCL}_\text{SB})}{1000 \text{m}} *
\frac{SRH_{\text{1km}}}{150 \text{m}^\text{s}/\text{s}^2} *
\frac{\text{Shear}_\text{6km}}{20 \text{m/s}}
The lcl height is set to zero when the lcl is above 2000m and
capped at 1 when below 1000m, and the shr6 term is set to 0
when shr6 is below 12.5 m/s and maxed out at 1.5 when shr6
exceeds 30 m/s.
Parameters
----------
sbcape : `pint.Quantity`
Surface-based CAPE
surface_based_lcl_height : `pint.Quantity`
Surface-based lifted condensation level
storm_helicity_1km : `pint.Quantity`
Surface-1km storm-relative helicity
shear_6km : `pint.Quantity`
Surface-6km bulk shear
Returns
-------
`pint.Quantity`
significant tornado parameter
"""
surface_based_lcl_height = np.clip(np.atleast_1d(surface_based_lcl_height),
1000 * units.m, 2000 * units.m)
surface_based_lcl_height[surface_based_lcl_height > 2000 * units.m] = 0 * units.m
surface_based_lcl_height = ((2000. * units.m - surface_based_lcl_height)
/ (1000. * units.m))
shear_6km = np.clip(np.atleast_1d(shear_6km), None, 30 * units('m/s'))
shear_6km[shear_6km < 12.5 * units('m/s')] = 0 * units('m/s')
shear_6km /= 20 * units('m/s')
return ((sbcape / (1500. * units('J/kg')))
* surface_based_lcl_height
* (storm_helicity_1km / (150. * units('m^2/s^2')))
* shear_6km)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[speed]', '[speed]', '[length]', '[speed]', '[speed]')
def critical_angle(pressure, u, v, height, u_storm, v_storm):
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
height : `pint.Quantity`
Heights from sounding.
u_storm : `pint.Quantity`
U-component of storm motion.
v_storm : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
"""
# Convert everything to m/s
u = u.to('m/s')
v = v.to('m/s')
u_storm = u_storm.to('m/s')
v_storm = v_storm.to('m/s')
sort_inds = np.argsort(pressure[::-1])
pressure = pressure[sort_inds]
height = height[sort_inds]
u = u[sort_inds]
v = v[sort_inds]
# Calculate sfc-500m shear vector
shr5 = bulk_shear(pressure, u, v, height=height, depth=500 * units('meter'))
# Make everything relative to the sfc wind orientation
umn = u_storm - u[0]
vmn = v_storm - v[0]
vshr = np.asarray([shr5[0].magnitude, shr5[1].magnitude])
vsm = np.asarray([umn.magnitude, vmn.magnitude])
angle_c = np.dot(vshr, vsm) / (np.linalg.norm(vshr) * np.linalg.norm(vsm))
critical_angle = np.arccos(angle_c) * units('radian')
return critical_angle.to('degrees')
| {
"content_hash": "209bbeb4b42e96a7e9593452edc6a780",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 94,
"avg_line_length": 35.635883905013195,
"alnum_prop": 0.6302384125573819,
"repo_name": "ShawnMurd/MetPy",
"id": "f297e25ceb1dd328df07af954f00ea296e74f1b7",
"size": "13649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/metpy/calc/indices.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "989941"
},
{
"name": "Python",
"bytes": "572079"
}
],
"symlink_target": ""
} |
from spa.models.mix import Mix
from spa.models.basemodel import BaseModel
from django.db import models
class Tracklist(BaseModel):
mix = models.ForeignKey(Mix, related_name='tracklist')
index = models.SmallIntegerField()
timeindex = models.TimeField(null=True)
description = models.CharField(max_length=255)
artist = models.CharField(max_length=255)
title = models.CharField(max_length=255)
remixer = models.CharField(max_length=255)
label = models.CharField(max_length=255)
| {
"content_hash": "a2a9b46c982e2a6b2fc83b5bc00ba333",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 40.07692307692308,
"alnum_prop": 0.7293666026871402,
"repo_name": "fergalmoran/dss",
"id": "fbd50658e21e7d692cc78d7f87ec57c5ffdb0f77",
"size": "521",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spa/models/tracklist.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1335630"
},
{
"name": "CoffeeScript",
"bytes": "91082"
},
{
"name": "JavaScript",
"bytes": "3576558"
},
{
"name": "Python",
"bytes": "1543569"
}
],
"symlink_target": ""
} |
import fileio
from reg import fetch, assign, CONT
from stats import goto_stats
INSTR = 'INSTR'
NO_INSTR = '"???"'
DONE = 'DONE'
@goto_stats
def goto(label):
fileio.write_file(INSTR, _convert_label(label))
def curr_instr():
return fileio.read_file(INSTR, NO_INSTR)
# these aren't strictly necessary, but they're very convenient.
# conceptually, we can imagine that the CONT register has some
# specialized physical connection to INSTR register
# (or PC, or whatever it really is)
def goto_continue():
goto(fetch(CONT))
def set_continue(label):
assign(CONT, _convert_label(label))
def _convert_label(label):
try:
return label.__name__
except AttributeError:
return label
| {
"content_hash": "44703b7f28460b91ba078c6a231f9e17",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 63,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.6972222222222222,
"repo_name": "nickdrozd/ecio-lisp",
"id": "ec34f92688bd44762db2ffc3684ac3c5fe4ac013",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43316"
},
{
"name": "Shell",
"bytes": "349"
}
],
"symlink_target": ""
} |
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.sites.models import get_current_site
from django.http import HttpResponse
from django.shortcuts import render
from content_edit.models import CmsContent
@staff_member_required
def ajax_save_content(request):
""" Save front end edited content """
site = get_current_site(request)
content_name = request.POST['content_name']
cms_content = CmsContent.objects.get(site=site, name=content_name)
cms_content.content = request.POST['content']
cms_content.save()
return HttpResponse('SUCCESS')
@staff_member_required
def sample_content_edit(request):
""" Just a test and demo view """
return render(request, 'content_edit/sample_content_edit.html')
| {
"content_hash": "b2cc961b0f9f73e9a61870a217e32078",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 71,
"avg_line_length": 38.55,
"alnum_prop": 0.7535667963683528,
"repo_name": "avinashdevicode/django-content-edit-master",
"id": "e1634b244776cdff6561c1ba823c050f983e0d41",
"size": "771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content_edit/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10703"
},
{
"name": "JavaScript",
"bytes": "931188"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "8364"
}
],
"symlink_target": ""
} |
from enum import Enum
from functools import wraps
import panoply.datasource
from .exceptions import DataSourceException
ERROR_CODES_REGISTRY = {
400: 'Bad request',
401: 'Unauthorized',
403: 'Permissions error',
404: 'Resource is not found',
408: 'Timeout',
422: 'Parsing error',
429: 'Rate limit',
500: 'Internal exception',
501: 'Unclassified error',
506: 'SDK error in processing an error'
}
EXCEPTIONS_REGISTRY = {}
class Phase(Enum):
COLLECT = 'collect'
CONFIG = 'config'
def set_internal_code(code: int) -> callable:
""" A decorator is used on exception classes to register mapping
of error and internal code from ERROR_CODES_REGISTRY.
Parameters
----------
code : int
Error code from ERROR_CODES_REGISTRY.
"""
def decorator(exception_cls: type) -> type:
if not issubclass(exception_cls, BaseException):
raise RuntimeError('Unable to set error code for non-Exception class')
EXCEPTIONS_REGISTRY[exception_cls] = code
return exception_cls
return decorator
def wrap_errors(phase: Phase) -> callable:
""" A decorator is used to normalize raised exceptions.
Parameters
----------
phase : Phase
Equals to CONFIG / COLLECT.
"""
def _wrap_errors(func: callable) -> callable:
@wraps(func)
def wrapper(*args, **kwargs) -> list:
try:
return func(*args, **kwargs)
except DataSourceException as e:
# In case of nested error wrapper we should keep the original
# error but with the phase value of the last error wrapper
e.phase = phase.value
raise e
except Exception as e:
# source object can be:
# 1. a first param in dynamic params methods (e.g. definition(source, options))
# 2. an attribute of the DataSource class (e.g. definition(self, params) -> source = self.source)
source_config = args[0]
if isinstance(source_config, panoply.datasource.DataSource):
source_config = getattr(source_config, 'source', None)
code = EXCEPTIONS_REGISTRY.get(type(e))
if code is None:
code = 501
if code not in ERROR_CODES_REGISTRY:
code = 506
details = {
'message': str(e),
'code': code,
'exception_cls': f'{e.__class__.__module__}.{e.__class__.__name__}',
'phase': phase.value,
'source_type': source_config['type'],
'source_id': source_config['id'],
'database_id': source_config['database']
}
normalized = DataSourceException(**details)
raise normalized
return wrapper
return _wrap_errors
| {
"content_hash": "f2f0f562bf3e438011dd832ff0a13bdb",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 113,
"avg_line_length": 32.04255319148936,
"alnum_prop": 0.5537848605577689,
"repo_name": "panoplyio/panoply-python-sdk",
"id": "da671757b49c3a06221c7bce88031234c463856f",
"size": "3012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panoply/errors/error_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29578"
}
],
"symlink_target": ""
} |
"""
Commands for setting temporary breakpoints on the next
instruction of some type (call, branch, etc.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import capstone
import gdb
import pwndbg.disasm
import pwndbg.regs
from pwndbg.color import message
jumps = set((
capstone.CS_GRP_CALL,
capstone.CS_GRP_JUMP,
capstone.CS_GRP_RET,
capstone.CS_GRP_IRET
))
interrupts = set((capstone.CS_GRP_INT,))
@pwndbg.events.exit
def clear_temp_breaks():
if not pwndbg.proc.alive:
breakpoints = gdb.breakpoints()
if breakpoints:
for bp in breakpoints:
if bp.temporary and not bp.visible: #visible is used instead of internal because older gdb's don't support internal
bp.delete()
def next_int(address=None):
"""
If there is a syscall in the current basic black,
return the instruction of the one closest to $PC.
Otherwise, return None.
"""
if address is None:
ins = pwndbg.disasm.one(pwndbg.regs.pc)
if not ins:
return None
address = ins.next
ins = pwndbg.disasm.one(address)
while ins:
if set(ins.groups) & jumps:
return None
if set(ins.groups) & interrupts:
return ins
ins = pwndbg.disasm.one(ins.next)
return None
def next_branch(address=None):
if address is None:
ins = pwndbg.disasm.one(pwndbg.regs.pc)
if not ins:
return None
address = ins.next
ins = pwndbg.disasm.one(address)
while ins:
if set(ins.groups) & jumps:
return ins
ins = pwndbg.disasm.one(ins.next)
return None
def break_next_branch(address=None):
ins = next_branch(address)
if ins:
gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True)
gdb.execute('continue', from_tty=False, to_string=True)
return ins
def break_next_interrupt(address=None):
ins = next_int(address)
if ins:
gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True)
gdb.execute('continue', from_tty=False, to_string=True)
return ins
def break_next_call(symbol_regex=None):
while pwndbg.proc.alive:
ins = break_next_branch()
if not ins:
break
# continue if not a call
if capstone.CS_GRP_CALL not in ins.groups:
continue
# return call if we don't search for a symbol
if not symbol_regex:
return ins
# return call if we match target address
if ins.target_const and re.match('%s$' % symbol_regex, hex(ins.target)):
return ins
# return call if we match symbol name
if ins.symbol and re.match('%s$' % symbol_regex, ins.symbol):
return ins
def break_next_ret(address=None):
while pwndbg.proc.alive:
ins = break_next_branch(address)
if not ins:
break
if capstone.CS_GRP_RET in ins.groups:
return ins
def break_on_program_code():
"""
Breaks on next instruction that belongs to process' objfile code.
:return: True for success, False when process ended or when pc is at the code.
"""
mp = pwndbg.proc.mem_page
start = mp.start
end = mp.end
if start <= pwndbg.regs.pc < end:
print(message.error('The pc is already at the binary objfile code. Not stepping.'))
return False
while pwndbg.proc.alive:
gdb.execute('si', from_tty=False, to_string=False)
addr = pwndbg.regs.pc
if start <= addr < end:
return True
return False
def break_on_next(address=None):
address = address or pwndbg.regs.pc
ins = pwndbg.disasm.one(address)
gdb.Breakpoint("*%#x" % (ins.address + ins.size), temporary=True)
gdb.execute('continue', from_tty=False, to_string=True)
| {
"content_hash": "4bf33408b18e93ce74ee9b6ea26c198b",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 132,
"avg_line_length": 25.240506329113924,
"alnum_prop": 0.6191073219658977,
"repo_name": "cebrusfs/217gdb",
"id": "9e86f75a07a8cfbed4087345d5c2ad4f1eecf830",
"size": "4034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pwndbg/next.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Go",
"bytes": "58"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Python",
"bytes": "1824522"
},
{
"name": "Shell",
"bytes": "6068"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="shape", parent_name="scatterternary.line", **kwargs
):
super(ShapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["linear", "spline"]),
**kwargs
)
| {
"content_hash": "d81bca216b0b16b62c079001a98e0704",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 35.266666666666666,
"alnum_prop": 0.5879017013232514,
"repo_name": "plotly/python-api",
"id": "74dcf83e26aef5ea8f144dc4f8b472bc6c10a01d",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterternary/line/_shape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
VERSION = '0.3.10'
DEFAULT_SWAGGER_SETTINGS = {
'exclude_url_names': [],
'exclude_namespaces': [],
'api_version': '',
'api_path': '/',
'api_key': '',
'relative_paths': False,
'token_type': 'Token',
'enabled_methods': ['get', 'post', 'put', 'patch', 'delete'],
'is_authenticated': False,
'is_superuser': False,
'unauthenticated_user': 'django.contrib.auth.models.AnonymousUser',
'permission_denied_handler': None,
'resource_access_handler': None,
'template_path': 'rest_framework_swagger/index.html',
'doc_expansion': 'none',
}
try:
from django.conf import settings
from django.test.signals import setting_changed
def load_settings(provided_settings):
global SWAGGER_SETTINGS
SWAGGER_SETTINGS = provided_settings
for key, value in DEFAULT_SWAGGER_SETTINGS.items():
if key not in SWAGGER_SETTINGS:
SWAGGER_SETTINGS[key] = value
def reload_settings(*args, **kwargs):
setting, value = kwargs['setting'], kwargs['value']
if setting == 'SWAGGER_SETTINGS':
load_settings(value)
load_settings(getattr(settings,
'SWAGGER_SETTINGS',
DEFAULT_SWAGGER_SETTINGS))
setting_changed.connect(reload_settings)
except:
SWAGGER_SETTINGS = DEFAULT_SWAGGER_SETTINGS
| {
"content_hash": "541e04a28e3ac8970c5fe611a601c656",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 71,
"avg_line_length": 31.136363636363637,
"alnum_prop": 0.6160583941605839,
"repo_name": "visasq/django-rest-swagger",
"id": "813de1e665624c4ba9d0fcb7a49a2caae8a4b73c",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/latest",
"path": "rest_framework_swagger/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "40557"
},
{
"name": "HTML",
"bytes": "10644"
},
{
"name": "JavaScript",
"bytes": "306924"
},
{
"name": "Python",
"bytes": "228314"
}
],
"symlink_target": ""
} |
"""
This module allows you to analyse OCaml source code, autocomplete,
and infer types while writing.
"""
import functools
import sublime
import sublime_plugin
import re
import os
import sys
if sys.version_info < (3, 0):
from merlin.process import MerlinProcess
from merlin.helpers import merlin_pos, only_ocaml, clean_whitespace
else:
from .merlin.process import MerlinProcess
from .merlin.helpers import merlin_pos, only_ocaml, clean_whitespace
running_process = None
def merlin_process(name):
global running_process
if running_process is None:
running_process = MerlinProcess()
return running_process.acquire(name)
class MerlinLoadPackage(sublime_plugin.WindowCommand):
"""
Command to find packages and load them into the current view.
"""
def run(self):
view = self.window.active_view()
self.process = merlin_process(view.file_name())
self.modules = self.process.find_list()
self.window.show_quick_panel(self.modules, self.on_done)
def on_done(self, index):
if index != -1:
self.process.find_use(self.modules[index])
class MerlinAddBuildPath(sublime_plugin.WindowCommand):
"""
Command to add a directory to the build path (for completion, typechecking, etc).
"""
def run(self):
view = self.window.active_view()
file_name = view.file_name()
self.process = merlin_process(file_name)
if file_name:
wd = os.path.dirname(os.path.abspath(file_name))
else:
wd = os.getcwd()
self.window.show_input_panel("Add build path", wd, self.on_done, None, None)
def on_done(self, directory):
self.process.add_build_path(directory)
class MerlinAddSourcePath(sublime_plugin.WindowCommand):
"""
Command to add a directory to the source path (for jumping to definition).
"""
def run(self):
view = self.window.active_view()
file_name = view.file_name()
self.process = merlin_process(file_name)
if file_name:
wd = os.path.dirname(os.path.abspath(file_name))
else:
wd = os.getcwd()
self.window.show_input_panel("Add source path", wd, self.on_done, None, None)
def on_done(self, directory):
self.process.add_source_path(directory)
class MerlinRemoveBuildPath(sublime_plugin.WindowCommand):
"""
Command to remove a directory from the build path.
"""
def run(self):
view = self.window.active_view()
self.process = merlin_process(view.file_name())
self.directories = self.process.list_build_path()
self.window.show_quick_panel(self.directories, self.on_done)
def on_done(self, index):
if index != -1:
self.process.remove_build_path(self.directories[index])
class MerlinRemoveSourcePath(sublime_plugin.WindowCommand):
"""
Command to remove a directory from the source path.
"""
def run(self):
view = self.window.active_view()
self.process = merlin_process(view.file_name())
self.directories = self.process.list_source_path()
self.window.show_quick_panel(self.directories, self.on_done)
def on_done(self, index):
if index != -1:
self.process.remove_source_path(self.directories[index])
class MerlinEnableExtension(sublime_plugin.WindowCommand):
"""
Enable syntax extension
"""
def run(self):
view = self.window.active_view()
self.process = merlin_process(view.file_name())
self.extensions = self.process.extension_list('disabled')
self.window.show_quick_panel(self.extensions, self.on_done)
def on_done(self, index):
if index != -1:
self.process.extension_enable([self.extensions[index]])
class MerlinDisableExtension(sublime_plugin.WindowCommand):
"""
Disable syntax extension
"""
def run(self):
view = self.window.active_view()
self.process = merlin_process(view.file_name())
self.extensions = self.process.extension_list('enabled')
self.window.show_quick_panel(self.extensions, self.on_done)
def on_done(self, index):
if index != -1:
self.process.extension_disable([self.extensions[index]])
class MerlinTypeEnclosing:
"""
Return type information around cursor.
"""
def __init__(self, view):
process = merlin_process(view.file_name())
process.sync_buffer_to_cursor(view)
pos = view.sel()
line, col = view.rowcol(pos[0].begin())
# FIXME: proper integration into sublime-text
# enclosing is a list of json objects of the form:
# { 'type': string;
# 'tail': "no"|"position"|"call" // tailcall information
# 'start', 'end': {'line': int, 'col': int}
# }
self.enclosing = process.type_enclosing(line + 1, col)
self.view = view
def _item_region(self, item):
start = merlin_pos(self.view, item['start'])
end = merlin_pos(self.view, item['end'])
return sublime.Region(start, end)
def _item_format(self, item):
text = item['type']
if item['tail'] == 'position':
text += " (*tail-position*)"
if item['tail'] == 'call':
text += " (*tail-call*)"
return clean_whitespace(text)
def _items(self):
return list(map(self._item_format, self.enclosing))
def show_panel(self):
self.view.window().show_quick_panel(self._items(), self.on_done, sublime.MONOSPACE_FONT)
def show_menu(self):
self.view.show_popup_menu(self._items(), self.on_done, sublime.MONOSPACE_FONT)
def on_done(self, index):
if index > -1:
sel = self.view.sel()
sel.clear()
sel.add(self._item_region(self.enclosing[index]))
class MerlinTypeCommand(sublime_plugin.WindowCommand):
"""
Return type information around cursor.
"""
def run(self):
enclosing = MerlinTypeEnclosing(self.view)
enclosing.show_panel()
class MerlinTypeMenu(sublime_plugin.TextCommand):
"""
Display type information in context menu
"""
def run(self, edit):
enclosing = MerlinTypeEnclosing(self.view)
enclosing.show_menu()
def merlin_locate_result(result, window):
if isinstance(result, dict):
pos = result['pos']
if 'file' in result:
filename = "%s:%d:%d" % (result['file'], pos['line'] - 1, pos['col'] + 1)
window.open_file(filename, sublime.ENCODED_POSITION | sublime.TRANSIENT)
else:
view = window.active_view()
sel = view.sel()
sel.clear()
pos = merlin_pos(view, pos)
sel.add(sublime.Region(pos, pos))
view.show_at_center(pos)
else:
sublime.message_dialog(result)
class MerlinLocateMli(sublime_plugin.WindowCommand):
"""
Locate definition under cursor
"""
def run(self):
view = self.window.active_view()
process = merlin_process(view.file_name())
process.sync_buffer_to_cursor(view)
pos = view.sel()
line, col = view.rowcol(pos[0].begin())
merlin_locate_result(process.locate(line + 1, col, kind=self.kind()), self.window)
def kind(self):
return "mli"
class MerlinLocateNameMli(sublime_plugin.WindowCommand):
"""
Locate definition by name
"""
def run(self, edit):
self.window.show_input_panel("Enter name", "", self.on_done, None, None)
def kind(self):
return "mli"
def on_done(self, name):
view = self.window.active_view()
process = merlin_process(view.file_name())
process.sync_buffer_to_cursor(view)
pos = view.sel()
line, col = view.rowcol(pos[0].begin())
merlin_locate_result(process.locate(line + 1, col, ident=name), self.window)
class MerlinLocateMl(MerlinLocateMli):
def kind(self):
return "ml"
class MerlinLocateNameMl(MerlinLocateNameMli):
def kind(self):
return "ml"
class MerlinWhich(sublime_plugin.WindowCommand):
"""
Abstract command to quickly find a file.
"""
def extensions(self):
return []
def run(self):
view = self.window.active_view()
self.process = merlin_process(view.file_name())
self.files = self.process.which_with_ext(self.extensions())
self.window.show_quick_panel(self.files, self.on_done)
def on_done(self, index):
if index != -1:
module_name = self.files[index]
modules = map(lambda ext: module_name + ext, self.extensions())
self.window.open_file(self.process.which_path(list(modules)))
class MerlinFindMl(MerlinWhich):
"""
Command to quickly find an ML file.
"""
def extensions(self):
return [".ml", ".mli"]
class MerlinFindMli(MerlinWhich):
"""
Command to quickly find an MLI file.
"""
def extensions(self):
return [".mli", ".ml"]
class Autocomplete(sublime_plugin.EventListener):
"""
Sublime Text autocompletion integration
"""
completions = []
cplns_ready = None
@only_ocaml
def on_query_completions(self, view, prefix, locations):
""" Sublime autocomplete event handler. """
# Expand the prefix with dots
l = locations[0]
line = view.substr(sublime.Region(view.line(l).a, l))
try:
prefix = re.findall(r"(([\w.]|->)+)", line)[-1][0]
except IndexError:
prefix = ""
process = merlin_process(view.file_name())
process.sync_buffer_to_cursor(view)
default_return = ([], sublime.INHIBIT_WORD_COMPLETIONS)
if self.cplns_ready:
self.cplns_ready = None
if self.completions:
cplns, self.completions = self.completions, []
return cplns
return default_return
if self.cplns_ready is None:
self.cplns_ready = False
line, col = view.rowcol(locations[0])
result = process.complete_cursor(prefix, line + 1, col)
self.cplns = [(clean_whitespace(r['name'] + '\t' + r['desc']), r['name']) for r in result['entries']]
self.show_completions(view, self.cplns)
return default_return
@only_ocaml
def show_completions(self, view, completions):
self.cplns_ready = True
if completions:
self.completions = completions
view.run_command("hide_auto_complete")
sublime.set_timeout(functools.partial(self.show, view), 0)
@only_ocaml
def show(self, view):
view.run_command("auto_complete", {
'disable_auto_insert': True,
'api_completions_only': True,
'next_completion_if_showing': False,
'auto_complete_commit_on_tab': True,
})
# Error panel stuff derived from SublimeClang under zlib license;
# see https://github.com/quarnster/SublimeClang#license.
class MerlinErrorPanelFlush(sublime_plugin.TextCommand):
def run(self, edit, data):
self.view.erase(edit, sublime.Region(0, self.view.size()))
self.view.insert(edit, 0, data)
class MerlinErrorPanel(object):
def __init__(self):
self.view = None
self.data = ""
def set_data(self, data):
self.data = data
if self.is_visible():
self.flush()
def is_visible(self, window=None):
ret = (self.view is not None) and (self.view.window() is not None)
if ret and window:
ret = self.view.window().id() == window.id()
return ret
def flush(self):
self.view.set_read_only(False)
self.view.set_scratch(True)
self.view.run_command("merlin_error_panel_flush", {"data": self.data})
self.view.set_read_only(True)
def open(self, window=None):
if window is None:
window = sublime.active_window()
if not self.is_visible(window):
self.view = window.get_output_panel("merlin")
self.view.settings().set("result_file_regex", "^(.+):([0-9]+):([0-9]+)")
self.flush()
window.run_command("show_panel", {"panel": "output.merlin"})
def close(self):
sublime.active_window().run_command("hide_panel", {
"panel": "output.merlin"
})
merlin_error_panel = MerlinErrorPanel()
class MerlinBuffer(sublime_plugin.EventListener):
"""
Synchronize the current buffer with Merlin and:
- autocomplete words with type informations;
- display errors in the gutter.
"""
_process = None
error_messages = []
def process(self, view):
if not self._process:
self._process = merlin_process(view.file_name())
return self._process
@only_ocaml
def on_post_save(self, view):
"""
Sync the buffer with Merlin on each text edit.
"""
self.process(view).sync_buffer(view) # Dummy sync with the whole file
self.display_in_error_panel(view)
self.show_errors(view)
@only_ocaml
def on_modified(self, view):
view.erase_regions('ocaml-underlines-errors')
def _plugin_dir(self):
path = os.path.realpath(__file__)
root = os.path.split(os.path.dirname(path))[1]
return os.path.splitext(root)[0]
def gutter_icon_path(self):
try:
resource = sublime.load_binary_resource("gutter-icon.png")
cache_path = os.path.join(sublime.cache_path(), "Merlin",
"gutter-icon.png")
if not os.path.isfile(cache_path):
if not os.path.isdir(os.path.dirname(cache_path)):
os.makedirs(os.path.dirname(cache_path))
with open(cache_path, "wb") as f:
f.write(resource)
return "Cache/Merlin/gutter-icon.png"
except IOError:
return "Packages/" + self._plugin_dir() + "/gutter-icon.png"
def show_errors(self, view):
"""
Show a simple gutter icon for each parsing error.
"""
view.erase_regions('ocaml-underlines-errors')
errors = self.process(view).report_errors()
error_messages = []
underlines = []
for e in errors:
if 'start' in e and 'end' in e:
pos_start = e['start']
pos_stop = e['end']
pnt_start = merlin_pos(view, pos_start)
pnt_stop = merlin_pos(view, pos_stop)
r = sublime.Region(pnt_start, pnt_stop)
line_r = view.full_line(r)
line_r = sublime.Region(line_r.a - 1, line_r.b)
underlines.append(r)
# Remove line and character number
message = e['message']
error_messages.append((line_r, message))
self.error_messages = error_messages
flag = sublime.DRAW_OUTLINED
# add_regions(key, regions, scope, icon, flags)
view.add_regions('ocaml-underlines-errors', underlines, 'invalid',
self.gutter_icon_path(), flag)
@only_ocaml
def on_selection_modified(self, view):
self.display_in_error_panel(view)
def display_in_error_panel(self, view):
"""
Display error message to the status bar when the selection intersects
with errors in the current view.
"""
caret_region = view.sel()[0]
for message_region, message_text in self.error_messages:
if message_region.intersects(caret_region):
merlin_error_panel.open()
merlin_error_panel.set_data(message_text)
| {
"content_hash": "8334fa94c5129921a3969348e16b9e6c",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 113,
"avg_line_length": 29.300556586270872,
"alnum_prop": 0.5981764072690432,
"repo_name": "frantic/sublime-text-merlin",
"id": "514407871bbaa4b38727b843b2a0c806706efd45",
"size": "15793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sublime-text-merlin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3606"
},
{
"name": "Python",
"bytes": "26071"
}
],
"symlink_target": ""
} |
"""
Unit Tests for ml2 rpc
"""
import collections
import mock
from sqlalchemy.orm import exc
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.plugins.ml2.drivers import type_tunnel
from neutron.plugins.ml2 import rpc as plugin_rpc
from neutron.tests import base
class RpcCallbacksTestCase(base.BaseTestCase):
def setUp(self):
super(RpcCallbacksTestCase, self).setUp()
self.callbacks = plugin_rpc.RpcCallbacks(mock.Mock(), mock.Mock())
self.manager = mock.patch.object(
plugin_rpc.manager, 'NeutronManager').start()
self.l3plugin = mock.Mock()
self.manager.get_service_plugins.return_value = {
'L3_ROUTER_NAT': self.l3plugin
}
self.plugin = self.manager.get_plugin()
def _test_update_device_up(self, extensions, kwargs):
with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin'
'._device_to_port_id'):
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.callbacks.update_device_up(mock.ANY, **kwargs)
def test_update_device_up_without_dvr(self):
kwargs = {
'agent_id': 'foo_agent',
'device': 'foo_device'
}
self._test_update_device_up(['router'], kwargs)
self.assertFalse(self.l3plugin.dvr_vmarp_table_update.call_count)
def test_update_device_up_with_dvr(self):
kwargs = {
'agent_id': 'foo_agent',
'device': 'foo_device'
}
self._test_update_device_up(['router', 'dvr'], kwargs)
self.l3plugin.dvr_vmarp_table_update.assert_called_once_with(
mock.ANY, mock.ANY, 'add')
def test_update_device_up_with_dvr_when_port_not_found(self):
kwargs = {
'agent_id': 'foo_agent',
'device': 'foo_device'
}
self.l3plugin.dvr_vmarp_table_update.side_effect = (
exceptions.PortNotFound(port_id='foo_port_id'))
self._test_update_device_up(['router', 'dvr'], kwargs)
self.assertTrue(self.l3plugin.dvr_vmarp_table_update.call_count)
def test_get_device_details_without_port_context(self):
self.plugin.get_bound_port_context.return_value = None
self.assertEqual(
{'device': 'fake_device'},
self.callbacks.get_device_details('fake_context',
device='fake_device'))
def test_get_device_details_port_context_without_bounded_segment(self):
self.plugin.get_bound_port_context().bound_segment = None
self.assertEqual(
{'device': 'fake_device'},
self.callbacks.get_device_details('fake_context',
device='fake_device'))
def test_get_device_details_port_status_equal_new_status(self):
port = collections.defaultdict(lambda: 'fake')
self.plugin.get_bound_port_context().current = port
self.plugin.port_bound_to_host = mock.MagicMock(return_value=True)
for admin_state_up in (True, False):
new_status = (constants.PORT_STATUS_BUILD if admin_state_up
else constants.PORT_STATUS_DOWN)
for status in (constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_BUILD,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_ERROR):
port['admin_state_up'] = admin_state_up
port['status'] = status
self.plugin.update_port_status.reset_mock()
self.callbacks.get_device_details('fake_context')
self.assertEqual(status == new_status,
not self.plugin.update_port_status.called)
def test_get_device_details_wrong_host(self):
port = collections.defaultdict(lambda: 'fake')
port_context = self.plugin.get_bound_port_context()
port_context.current = port
port_context.host = 'fake'
self.plugin.update_port_status.reset_mock()
self.callbacks.get_device_details('fake_context',
host='fake_host')
self.assertFalse(self.plugin.update_port_status.called)
def test_get_device_details_port_no_host(self):
port = collections.defaultdict(lambda: 'fake')
port_context = self.plugin.get_bound_port_context()
port_context.current = port
self.plugin.update_port_status.reset_mock()
self.callbacks.get_device_details('fake_context')
self.assertTrue(self.plugin.update_port_status.called)
def test_get_devices_details_list(self):
devices = [1, 2, 3, 4, 5]
kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'}
with mock.patch.object(self.callbacks, 'get_device_details',
side_effect=devices) as f:
res = self.callbacks.get_devices_details_list('fake_context',
devices=devices,
**kwargs)
self.assertEqual(devices, res)
self.assertEqual(len(devices), f.call_count)
calls = [mock.call('fake_context', device=i, **kwargs)
for i in devices]
f.assert_has_calls(calls)
def test_get_devices_details_list_with_empty_devices(self):
with mock.patch.object(self.callbacks, 'get_device_details') as f:
res = self.callbacks.get_devices_details_list('fake_context')
self.assertFalse(f.called)
self.assertEqual([], res)
def _test_update_device_not_bound_to_host(self, func):
self.plugin.port_bound_to_host.return_value = False
self.plugin._device_to_port_id.return_value = 'fake_port_id'
res = func('fake_context', device='fake_device', host='fake_host')
self.plugin.port_bound_to_host.assert_called_once_with('fake_context',
'fake_port_id',
'fake_host')
return res
def test_update_device_up_with_device_not_bound_to_host(self):
self.assertIsNone(self._test_update_device_not_bound_to_host(
self.callbacks.update_device_up))
def test_update_device_down_with_device_not_bound_to_host(self):
self.assertEqual(
{'device': 'fake_device', 'exists': True},
self._test_update_device_not_bound_to_host(
self.callbacks.update_device_down))
def test_update_device_down_call_update_port_status(self):
self.plugin.update_port_status.return_value = False
self.plugin._device_to_port_id.return_value = 'fake_port_id'
self.assertEqual(
{'device': 'fake_device', 'exists': False},
self.callbacks.update_device_down('fake_context',
device='fake_device',
host='fake_host'))
self.plugin.update_port_status.assert_called_once_with(
'fake_context', 'fake_port_id', constants.PORT_STATUS_DOWN,
'fake_host')
def test_update_device_down_call_update_port_status_failed(self):
self.plugin.update_port_status.side_effect = exc.StaleDataError
self.assertEqual({'device': 'fake_device', 'exists': False},
self.callbacks.update_device_down(
'fake_context', device='fake_device'))
class RpcApiTestCase(base.BaseTestCase):
def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_msg = rpcapi.make_msg(method, **kwargs)
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
rpc = n_rpc.RpcProxy
with mock.patch.object(rpc, rpc_method) as rpc_method_mock:
rpc_method_mock.return_value = expected_retval
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
additional_args = {}
if topic:
additional_args['topic'] = topic
if expected_version:
additional_args['version'] = expected_version
expected = [
mock.call(ctxt, expected_msg, **additional_args)
]
rpc_method_mock.assert_has_calls(expected)
def test_delete_network(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_tunnel_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
type_tunnel.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='fanout_cast',
tunnel_ip='fake_ip', tunnel_type='gre')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_devices_details_list(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'get_devices_details_list', rpc_method='call',
devices=['fake_device1', 'fake_device2'],
agent_id='fake_agent_id', host='fake_host',
version='1.3')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'update_device_up', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
| {
"content_hash": "950ea4407609ae8a4a5efa12b4356ccc",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 78,
"avg_line_length": 45.066914498141266,
"alnum_prop": 0.5520085787346366,
"repo_name": "redhat-openstack/neutron",
"id": "0d51a9e2736f3c53e8852fd59a4c3c98f9eb59bc",
"size": "12763",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "neutron/tests/unit/ml2/test_rpcapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "10433756"
},
{
"name": "Shell",
"bytes": "11069"
}
],
"symlink_target": ""
} |
from kafkaka.gevent_patch import KafkaClient
from gevent import joinall
from gevent import spawn, sleep
import gevent
import time
from gevent.server import StreamServer
gevent.monkey.patch_all()
def test(c):
i=0
while 1:
i += 1
for j in xrange(0, 500):
c.send_message('im-msg', u'你好'.encode('utf8')+" " + str(time.time()) + " " + str(i))
c.send_message('im-msg', 'hi'.encode('utf8')+" " + str(time.time()) + " " + str(i))
gevent.sleep(5)
if __name__ == "__main__":
c = KafkaClient("localhost:9092",
topic_names=['im-msg'],
pool_size=3 # the number of max parallel connections.
)
spawn(test, c)
def handle(socket, address):
socket.send("Hello from a telnet!\n")
for i in range(5):
socket.send(str(i) + '\n')
socket.close()
server = StreamServer(('127.0.0.1', 5000), handle)
server.serve_forever()
| {
"content_hash": "b5aff214cb4711b1a13e321fcaa10d6f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 96,
"avg_line_length": 30.09375,
"alnum_prop": 0.5607476635514018,
"repo_name": "wesdu/kafkaka",
"id": "ef0f3bffb42ca121ec7ac4bd97c061e5435deddb",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_gevent_pool_size.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72818"
}
],
"symlink_target": ""
} |
"""Create a new site."""
import datetime
import io
import json
import os
import shutil
import textwrap
import unidecode
from urllib.parse import urlsplit, urlunsplit
import dateutil.tz
import dateutil.zoneinfo
from mako.template import Template
from pkg_resources import resource_filename
import nikola
from nikola.nikola import DEFAULT_INDEX_READ_MORE_LINK, DEFAULT_FEED_READ_MORE_LINK, LEGAL_VALUES
from nikola.plugin_categories import Command
from nikola.utils import ask, ask_yesno, get_logger, makedirs, load_messages
from nikola.packages.tzlocal import get_localzone
LOGGER = get_logger('init')
SAMPLE_CONF = {
'BLOG_AUTHOR': "Your Name",
'BLOG_TITLE': "Demo Site",
'SITE_URL': "https://example.com/",
'BLOG_EMAIL': "joe@demo.site",
'BLOG_DESCRIPTION': "This is a demo site for Nikola.",
'PRETTY_URLS': True,
'STRIP_INDEXES': True,
'DEFAULT_LANG': "en",
'TRANSLATIONS': """{
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}""",
'THEME': LEGAL_VALUES['DEFAULT_THEME'],
'TIMEZONE': 'UTC',
'COMMENT_SYSTEM': 'disqus',
'COMMENT_SYSTEM_ID': 'nikolademo',
'CATEGORY_ALLOW_HIERARCHIES': False,
'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,
'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,
'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,
'POSTS': """(
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)""",
'PAGES': """(
("pages/*.rst", "pages", "page.tmpl"),
("pages/*.md", "pages", "page.tmpl"),
("pages/*.txt", "pages", "page.tmpl"),
("pages/*.html", "pages", "page.tmpl"),
)""",
'COMPILERS': """{
"rest": ['.rst', '.txt'],
"markdown": ['.md', '.mdown', '.markdown'],
"textile": ['.textile'],
"txt2tags": ['.t2t'],
"bbcode": ['.bb'],
"wiki": ['.wiki'],
"ipynb": ['.ipynb'],
"html": ['.html', '.htm'],
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ['.php'],
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ['.rst', '.md', '.txt'],
}""",
'NAVIGATION_LINKS': """{
DEFAULT_LANG: (
("/archive.html", "Archives"),
("/categories/index.html", "Tags"),
("/rss.xml", "RSS feed"),
),
}""",
'REDIRECTIONS': [],
'_METADATA_MAPPING_FORMATS': ', '.join(LEGAL_VALUES['METADATA_MAPPING'])
}
# Generate a list of supported languages here.
# Ugly code follows.
_suplang = {}
_sllength = 0
for k, v in LEGAL_VALUES['TRANSLATIONS'].items():
if not isinstance(k, tuple):
main = k
_suplang[main] = v
else:
main = k[0]
k = k[1:]
bad = []
good = []
for i in k:
if i.startswith('!'):
bad.append(i[1:])
else:
good.append(i)
different = ''
if good or bad:
different += ' ['
if good:
different += 'ALTERNATIVELY ' + ', '.join(good)
if bad:
if good:
different += '; '
different += 'NOT ' + ', '.join(bad)
if good or bad:
different += ']'
_suplang[main] = v + different
if len(main) > _sllength:
_sllength = len(main)
_sllength = str(_sllength)
suplang = (u'# {0:<' + _sllength + u'} {1}\n').format('en', 'English')
del _suplang['en']
for k, v in sorted(_suplang.items()):
suplang += (u'# {0:<' + _sllength + u'} {1}\n').format(k, v)
SAMPLE_CONF['_SUPPORTED_LANGUAGES'] = suplang.strip()
# Generate a list of supported comment systems here.
SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'] = '\n'.join(textwrap.wrap(
u', '.join(LEGAL_VALUES['COMMENT_SYSTEM']),
initial_indent=u'# ', subsequent_indent=u'# ', width=79))
def format_default_translations_config(additional_languages):
"""Adapt TRANSLATIONS setting for all additional languages."""
if not additional_languages:
return SAMPLE_CONF["TRANSLATIONS"]
lang_paths = [' DEFAULT_LANG: "",']
for lang in sorted(additional_languages):
lang_paths.append(' "{0}": "./{0}",'.format(lang))
return "{{\n{0}\n}}".format("\n".join(lang_paths))
def get_default_translations_dict(default_lang, additional_languages):
"""Generate a TRANSLATIONS dict matching the config from 'format_default_translations_config'."""
tr = {default_lang: ''}
for l in additional_languages:
tr[l] = './' + l
return tr
def format_navigation_links(additional_languages, default_lang, messages, strip_indexes=False):
"""Return the string to configure NAVIGATION_LINKS."""
f = u"""\
{0}: (
("{1}/archive.html", "{2[Archive]}"),
("{1}/categories/{3}", "{2[Tags]}"),
("{1}/rss.xml", "{2[RSS feed]}"),
),"""
pairs = []
def get_msg(lang):
"""Generate a smaller messages dict with fallback."""
fmsg = {}
for i in (u'Archive', u'Tags', u'RSS feed'):
if messages[lang][i]:
fmsg[i] = messages[lang][i]
else:
fmsg[i] = i
return fmsg
if strip_indexes:
index_html = ''
else:
index_html = 'index.html'
# handle the default language
pairs.append(f.format('DEFAULT_LANG', '', get_msg(default_lang), index_html))
for l in additional_languages:
pairs.append(f.format(json.dumps(l, ensure_ascii=False), '/' + l, get_msg(l), index_html))
return u'{{\n{0}\n}}'.format('\n\n'.join(pairs))
# In order to ensure proper escaping, all variables but the pre-formatted ones
# are handled by json.dumps().
def prepare_config(config):
"""Parse sample config with JSON."""
p = config.copy()
p.update({k: json.dumps(v, ensure_ascii=False) for k, v in p.items()
if k not in ('POSTS', 'PAGES', 'COMPILERS', 'TRANSLATIONS', 'NAVIGATION_LINKS', '_SUPPORTED_LANGUAGES', '_SUPPORTED_COMMENT_SYSTEMS', 'INDEX_READ_MORE_LINK', 'FEED_READ_MORE_LINK', '_METADATA_MAPPING_FORMATS')})
# READ_MORE_LINKs require some special treatment.
p['INDEX_READ_MORE_LINK'] = "'" + p['INDEX_READ_MORE_LINK'].replace("'", "\\'") + "'"
p['FEED_READ_MORE_LINK'] = "'" + p['FEED_READ_MORE_LINK'].replace("'", "\\'") + "'"
# fix booleans and None
p.update({k: str(v) for k, v in config.items() if isinstance(v, bool) or v is None})
return p
def test_destination(destination, demo=False):
"""Check if the destination already exists, which can break demo site creation."""
# Issue #2214
if demo and os.path.exists(destination):
LOGGER.warning("The directory {0} already exists, and a new demo site cannot be initialized in an existing directory.".format(destination))
LOGGER.warning("Please remove the directory and try again, or use another directory.")
LOGGER.info("Hint: If you want to initialize a git repository in this directory, run `git init` in the directory after creating a Nikola site.")
return False
else:
return True
class CommandInit(Command):
"""Create a new site."""
name = "init"
doc_usage = "[--demo] [--quiet] folder"
needs_config = False
doc_purpose = "create a Nikola site in the specified folder"
cmd_options = [
{
'name': 'quiet',
'long': 'quiet',
'short': 'q',
'default': False,
'type': bool,
'help': "Do not ask questions about config.",
},
{
'name': 'demo',
'long': 'demo',
'short': 'd',
'default': False,
'type': bool,
'help': "Create a site filled with example data.",
}
]
@classmethod
def copy_sample_site(cls, target):
"""Copy sample site data to target directory."""
src = resource_filename('nikola', os.path.join('data', 'samplesite'))
shutil.copytree(src, target)
@staticmethod
def create_configuration(target):
"""Create configuration file."""
template_path = resource_filename('nikola', 'conf.py.in')
conf_template = Template(filename=template_path)
conf_path = os.path.join(target, 'conf.py')
with io.open(conf_path, 'w+', encoding='utf8') as fd:
fd.write(conf_template.render(**prepare_config(SAMPLE_CONF)))
@staticmethod
def create_configuration_to_string():
"""Return configuration file as a string."""
template_path = resource_filename('nikola', 'conf.py.in')
conf_template = Template(filename=template_path)
return conf_template.render(**prepare_config(SAMPLE_CONF))
@classmethod
def create_empty_site(cls, target):
"""Create an empty site with directories only."""
for folder in ('files', 'galleries', 'images', 'listings', 'posts', 'pages'):
makedirs(os.path.join(target, folder))
@staticmethod
def ask_questions(target, demo=False):
"""Ask some questions about Nikola."""
def urlhandler(default, toconf):
answer = ask('Site URL', 'https://example.com/')
try:
answer = answer.decode('utf-8')
except (AttributeError, UnicodeDecodeError):
pass
if not answer.startswith(u'http'):
print(" ERROR: You must specify a protocol (http or https).")
urlhandler(default, toconf)
return
if not answer.endswith('/'):
print(" The URL does not end in '/' -- adding it.")
answer += '/'
dst_url = urlsplit(answer)
try:
dst_url.netloc.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
# The IDN contains characters beyond ASCII. We must convert it
# to Punycode. (Issue #1644)
nl = dst_url.netloc.encode('idna')
answer = urlunsplit((dst_url.scheme,
nl,
dst_url.path,
dst_url.query,
dst_url.fragment))
print(" Converting to Punycode:", answer)
SAMPLE_CONF['SITE_URL'] = answer
def prettyhandler(default, toconf):
SAMPLE_CONF['PRETTY_URLS'] = ask_yesno('Enable pretty URLs (/page/ instead of /page.html) that don\'t need web server configuration?', default=True)
def lhandler(default, toconf, show_header=True):
if show_header:
print("We will now ask you to provide the list of languages you want to use.")
print("Please list all the desired languages, comma-separated, using ISO 639-1 codes. The first language will be used as the default.")
print("Type '?' (a question mark, sans quotes) to list available languages.")
answer = ask('Language(s) to use', 'en')
while answer.strip() == '?':
print('\n# Available languages:')
try:
print(SAMPLE_CONF['_SUPPORTED_LANGUAGES'] + '\n')
except UnicodeEncodeError:
# avoid Unicode characters in supported language names
print(unidecode.unidecode(SAMPLE_CONF['_SUPPORTED_LANGUAGES']) + '\n')
answer = ask('Language(s) to use', 'en')
langs = [i.strip().lower().replace('-', '_') for i in answer.split(',')]
for partial, full in LEGAL_VALUES['_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS'].items():
if partial in langs:
langs[langs.index(partial)] = full
print("NOTICE: Assuming '{0}' instead of '{1}'.".format(full, partial))
default = langs.pop(0)
SAMPLE_CONF['DEFAULT_LANG'] = default
# format_default_translations_config() is intelligent enough to
# return the current value if there are no additional languages.
SAMPLE_CONF['TRANSLATIONS'] = format_default_translations_config(langs)
# Get messages for navigation_links. In order to do this, we need
# to generate a throwaway TRANSLATIONS dict.
tr = get_default_translations_dict(default, langs)
# Assuming that base contains all the locales, and that base does
# not inherit from anywhere.
try:
messages = load_messages(['base'], tr, default, themes_dirs=['themes'])
SAMPLE_CONF['NAVIGATION_LINKS'] = format_navigation_links(langs, default, messages, SAMPLE_CONF['STRIP_INDEXES'])
except nikola.utils.LanguageNotFoundError as e:
print(" ERROR: the language '{0}' is not supported.".format(e.lang))
print(" Are you sure you spelled the name correctly? Names are case-sensitive and need to be reproduced as-is (complete with the country specifier, if any).")
print("\nType '?' (a question mark, sans quotes) to list available languages.")
lhandler(default, toconf, show_header=False)
def tzhandler(default, toconf):
print("\nPlease choose the correct time zone for your blog. Nikola uses the tz database.")
print("You can find your time zone here:")
print("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones")
print("")
answered = False
while not answered:
try:
lz = get_localzone()
except Exception:
lz = None
answer = ask('Time zone', lz if lz else "UTC")
tz = dateutil.tz.gettz(answer)
if tz is None:
print(" WARNING: Time zone not found. Searching list of time zones for a match.")
all_zones = dateutil.zoneinfo.get_zonefile_instance().zones
matching_zones = [zone for zone in all_zones if answer.lower() in zone.lower()]
if len(matching_zones) == 1:
tz = dateutil.tz.gettz(matching_zones[0])
answer = matching_zones[0]
print(" Picking '{0}'.".format(answer))
elif len(matching_zones) > 1:
print(" The following time zones match your query:")
print(' ' + '\n '.join(matching_zones))
continue
if tz is not None:
time = datetime.datetime.now(tz).strftime('%H:%M:%S')
print(" Current time in {0}: {1}".format(answer, time))
answered = ask_yesno("Use this time zone?", True)
else:
print(" ERROR: No matches found. Please try again.")
SAMPLE_CONF['TIMEZONE'] = answer
def chandler(default, toconf):
print("You can configure comments now. Type '?' (a question mark, sans quotes) to list available comment systems. If you do not want any comments, just leave the field blank.")
answer = ask('Comment system', '')
while answer.strip() == '?':
print('\n# Available comment systems:')
print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])
print('')
answer = ask('Comment system', '')
while answer and answer not in LEGAL_VALUES['COMMENT_SYSTEM']:
if answer != '?':
print(' ERROR: Nikola does not know this comment system.')
print('\n# Available comment systems:')
print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])
print('')
answer = ask('Comment system', '')
SAMPLE_CONF['COMMENT_SYSTEM'] = answer
SAMPLE_CONF['COMMENT_SYSTEM_ID'] = ''
if answer:
print("You need to provide the site identifier for your comment system. Consult the Nikola manual for details on what the value should be. (you can leave it empty and come back later)")
answer = ask('Comment system site identifier', '')
SAMPLE_CONF['COMMENT_SYSTEM_ID'] = answer
STORAGE = {'target': target}
questions = [
('Questions about the site', None, None, None),
# query, default, toconf, destination
('Destination', None, False, '!target'),
('Site title', 'My Nikola Site', True, 'BLOG_TITLE'),
('Site author', 'Nikola Tesla', True, 'BLOG_AUTHOR'),
('Site author\'s e-mail', 'n.tesla@example.com', True, 'BLOG_EMAIL'),
('Site description', 'This is a demo site for Nikola.', True, 'BLOG_DESCRIPTION'),
(urlhandler, None, True, True),
(prettyhandler, None, True, True),
('Questions about languages and locales', None, None, None),
(lhandler, None, True, True),
(tzhandler, None, True, True),
('Questions about comments', None, None, None),
(chandler, None, True, True),
]
print("Creating Nikola Site")
print("====================\n")
print("This is Nikola v{0}. We will now ask you a few easy questions about your new site.".format(nikola.__version__))
print("If you do not want to answer and want to go with the defaults instead, simply restart with the `-q` parameter.")
for query, default, toconf, destination in questions:
if target and destination == '!target' and test_destination(target, demo):
# Skip the destination question if we know it already
pass
else:
if default is toconf is destination is None:
print('--- {0} ---'.format(query))
elif destination is True:
query(default, toconf)
else:
answer = ask(query, default)
try:
answer = answer.decode('utf-8')
except (AttributeError, UnicodeDecodeError):
pass
if toconf:
SAMPLE_CONF[destination] = answer
if destination == '!target':
while not answer or not test_destination(answer, demo):
if not answer:
print(' ERROR: you need to specify a target directory.\n')
answer = ask(query, default)
STORAGE['target'] = answer
print("\nThat's it, Nikola is now configured. Make sure to edit conf.py to your liking.")
print("If you are looking for themes and addons, check out https://themes.getnikola.com/ and https://plugins.getnikola.com/.")
print("Have fun!")
return STORAGE
def _execute(self, options={}, args=None):
"""Create a new site."""
try:
target = args[0]
except IndexError:
target = None
if not options.get('quiet'):
st = self.ask_questions(target=target, demo=options.get('demo'))
try:
if not target:
target = st['target']
except KeyError:
pass
if not target:
print("Usage: nikola init [--demo] [--quiet] folder")
print("""
Options:
-q, --quiet Do not ask questions about config.
-d, --demo Create a site filled with example data.""")
return 1
if not options.get('demo'):
self.create_empty_site(target)
LOGGER.info('Created empty site at {0}.'.format(target))
else:
if not test_destination(target, True):
return 2
self.copy_sample_site(target)
LOGGER.info("A new site with example data has been created at "
"{0}.".format(target))
LOGGER.info("See README.txt in that folder for more information.")
self.create_configuration(target)
| {
"content_hash": "fcff02e867f049d76d0649d9ea1a473d",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 224,
"avg_line_length": 41.13627254509018,
"alnum_prop": 0.5547815072830906,
"repo_name": "getnikola/nikola",
"id": "4607758b6ee085f351bb8e14cff398aa0aa5be18",
"size": "21669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/command/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34036"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "2076"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1299776"
},
{
"name": "Shell",
"bytes": "9704"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
MATRIX -> IMAGE.
Use imgmap.py with mapimg.py.
All image in folder with two colors.
$ imgmap.py | mapimg.py 222,0,222,220 100,220,0,200
Args.
$ imgmap.py test.png | mapimg.py 222,0,222,220
Stdin.
$ echo 'test.png' | imgmap.py | mapimg.py 222,0,222,220
Stdout.
$ imgmap.py icon.png >> out.txt
"""
__version__ = 1.0
# imgmap.py
# MIT License
# Copyright (c) 2017 Alexander Veledzimovich veledz@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# use black, white or transparent background
# indexed colors - faster and better, resolution <600 px
# PNG: trasparent - OK, white - OK, black - OK, indexed - OK, RGB - BAD
# BMP: transparent - OK, white - OK, black - OK, indexed - OK, RGB - OK
# TIFF: transparent - OK, white - OK, black - OK, indexed - OK, RGB - OK
# JPEG: transparent - NO, white - BAD, black - BAD indexed - NO, RGB - BAD
from glob import glob
from os import path
from sys import argv, stderr, stdin, stdout
from PIL import Image
# made tolerance
def image_matrix(img):
"""
image_matrix(img)
take:
img: PATH to image file
return:
map_image: matrix with 0 - background, 1,2,3...n - diferent colors.
"""
image = Image.open(img)
image = image.convert('RGBA')
back = image.getpixel((0, 0))
if back in [(0, 0, 0, 0), (255, 255, 255, 0), (0, 0, 0, 255),
(255, 255, 255, 255)]:
w, h = image.size
# create map image
map_image = [[0] * w for i in range(h)]
colors = dict()
# create map with colors
for y in range(h):
for x in range(w):
pix = image.getpixel((x, y))
if pix != back:
if not colors.get(pix):
colors[pix] = 1
else:
colors[pix] += 1
map_image[y][x] = pix
# sort colors for find main color
sort_colors = sorted(list(colors.items()),
key=lambda i: i[1], reverse=True)
# put numbers to code colors
for y in range(h):
for num, item in enumerate(sort_colors, 1):
for x in range(w):
if map_image[y][x] == item[0]:
map_image[y][x] = num
return map_image
if __name__ == '__main__':
inp = []
EXT = ('*.jpeg', '*.jpg', '*.png', '*.gif', '*.tiff', '*.tif', '*.bmp')
if argv[1:]:
inp = [i for i in argv[1:] if path.exists(i)]
elif stdin.isatty() is False:
# to avoid /n in the end of the line
inp = [i.strip() for i in stdin.readlines()
if path.exists(i.strip())]
elif any([glob(i) for i in EXT]):
all_images = [glob(i) for i in EXT] + [glob(i.upper()) for i in EXT]
inp = [image for list_ in all_images
for image in list_]
else:
stderr.write('Error: empty input\n')
if inp:
map_collection = []
for i in inp:
matrix = image_matrix(i)
if matrix:
map_collection.append(matrix)
else:
stderr.write(
'Error: use black/white/transparent background\n'
)
for i in map_collection:
# use print easiest way but it is also a string
print(i)
else:
stderr.write('Error: wrong path\n')
| {
"content_hash": "6c00c2c73bac6d8e9d171e18b30b80cf",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 77,
"avg_line_length": 31.884057971014492,
"alnum_prop": 0.5861363636363637,
"repo_name": "schwarzbox/Tools-Python3",
"id": "451421a3280db0d58957a5c572f5c144d383524f",
"size": "4448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imgmap.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32341"
}
],
"symlink_target": ""
} |
'''
Trains different classifiers on the Emobase 2010 dataset
'''
# Add path to our module(s).
import sys
import os
import pickle
import argparse
sys.path.append("../src/python")
import motherese
# Paths.
FEATS_DIR = "../feats"
MODEL_DIR = "../models"
import numpy as np
from sklearn import svm
from sklearn import cluster
from sklearn import ensemble
from sklearn import naive_bayes
from sklearn import tree
from sklearn import cross_validation
from sklearn import decomposition
from sklearn import metrics
def main(config_name, prefix,unbalancetrain):
print "Creating models with config: \"{}\"".format(config_name)
#create models dir
if not os.path.isdir(MODEL_DIR):
os.mkdir(MODEL_DIR)
#### Loading data
data = motherese.load_data(config_name, True, (not unbalancetrain), data_dir=FEATS_DIR)
entire_data = motherese.load_data(config_name, True, False, data_dir=FEATS_DIR)
### save scaler
pickle.dump(data["scaler"], open(os.path.join(MODEL_DIR, prefix + "scaler.p"), 'wb'))
#### Support vector machines and leafy classifiers
classifier_list = [
("libsvm", lambda: svm.SVC(kernel='linear', class_weight='auto', C=0.001)),
("logreg", lambda: svm.LinearSVC(C=0.01, penalty="l1", dual=False, class_weight='auto')),
("rbf_svm", lambda: svm.SVC(kernel='rbf')),
("rbf_nu_svm", lambda: svm.NuSVC(kernel='rbf')),
("random_forest", lambda: ensemble.RandomForestClassifier(criterion="entropy", n_estimators=50, min_samples_split=3, max_depth=6)),
("decision_tree", lambda: tree.DecisionTreeClassifier(criterion="entropy", min_samples_split=3, max_depth=6)),
("naive_bayes", lambda: naive_bayes.GaussianNB())
]
for name, clf_gen in classifier_list:
try:
clf = clf_gen()
clf.fit(data['X_train'], data['y_train'])
except:
print 'Skipping ' + name
continue
# CHANGE NAME
pickle.dump(clf, open(os.path.join(MODEL_DIR, prefix + name + ".p"), 'wb'))
predictions = clf.predict(data['X_train'])
predictions_test = clf.predict(entire_data['X_test'])
print name, metrics.roc_auc_score(data['y_train'], predictions), metrics.roc_auc_score(data['y_test'], predictions_test),\
metrics.accuracy_score(data['y_train'], predictions), metrics.accuracy_score(data['y_test'], predictions_test), \
metrics.precision_score(data['y_train'], predictions), metrics.precision_score(data['y_test'], predictions_test), \
metrics.recall_score(data['y_train'], predictions), metrics.recall_score(data['y_test'], predictions_test), \
metrics.f1_score(data['y_train'], predictions), metrics.f1_score(data['y_test'], predictions_test), \
sum(data['y_train']==0), sum(data['y_train']==1), \
sum(data['y_test']==0), sum(data['y_test']==1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train motherese classifiers.")
parser.add_argument("config_name", help="Configuration to use (e.g. hw4, default: emobase2010-nomfcc)",
default="emobase2010-nomfcc", nargs="?")
parser.add_argument("prefix",help="Prefix for model name",default="", nargs="?")
parser.add_argument("--unbalancetrain", action="store_true",default=False)
args = parser.parse_args()
main(args.config_name, args.prefix, args.unbalancetrain)
| {
"content_hash": "8018cc02ec761fa45bc72646fabce18e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 139,
"avg_line_length": 37.90217391304348,
"alnum_prop": 0.6435331230283912,
"repo_name": "sebschu/cds-detector",
"id": "69285ee91a05f2e5b8332fcfa95d4458629f6ab8",
"size": "3506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "42885562"
},
{
"name": "Python",
"bytes": "14691"
}
],
"symlink_target": ""
} |
""" Test module for 3-boidy phase space
"""
# =============================================================================
from ostap.core.pyrouts import Ostap, SE
from ostap.utils.gsl import gslCount
from ostap.logger.colorized import attention
import ostap.logger.table as T
from ostap.utils.utils import wait
from ostap.plotting.canvas import use_canvas
from ostap.math.models import f1_draw
from ostap.math.minimize import minimize_scalar
import ROOT, math, random, itertools
# ============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.test_math_phasespace3' )
else : logger = getLogger ( __name__ )
# ============================================================================
# =============================================================================
## Test 3-body phase space calculation via elliptic integrals
# @see Ostap::Math::PhaseSpace3
# @see Ostap::Math::PhaseSpace3s
# @see Ostap::Kinematics::phasespace3
# @see https://indico.cern.ch/event/368497/contributions/1786992/attachments/1134067/1621999/davydychev.PDF
# @see http://cds.cern.ch/record/583358/files/0209233.pdf
# @see https://www.researchgate.net/publication/2054534_Three-body_phase_space_symmetrical_treatments
#
# @see A.Davydychev and R.Delbourgo,
# "Explicitly symmetrical treatment of three body phase space",
# J.Phys. A37 (2004) 4871, arXiv:hep-th/0311075",
# doi = 10.1088/0305-4470/37/17/016
# @see https://arxiv.org/abs/hep-th/0311075
# @see https://iopscience.iop.org/article/10.1088/0305-4470/37/17/016
#
def test_phasespace3 ( ) :
"""Test 3-body phase space calculation via elliptic integrals
- see Ostap.Math.PhaseSpace3
- see Ostap.Math.PhaseSpace3s
- see Ostap.Kinematics.phasespace3
- see https://indico.cern.ch/event/368497/contributions/1786992/attachments/1134067/1621999/davydychev.PDF
- see http://cds.cern.ch/record/583358/files/0209233.pdf
- see https://www.researchgate.net/publication/2054534_Three-body_phase_space_symmetrical_treatments
- see A.Davydychev and R.Delbourgo, ``Explicitly symmetrical treatment of three body phase space'',
J.Phys. A37 (2004) 4871, arXiv:hep-th/0311075,
doi = 10.1088/0305-4470/37/17/016
- see https://arxiv.org/abs/hep-th/0311075
- see https://iopscience.iop.org/article/10.1088/0305-4470/37/17/016
"""
logger = getLogger( 'test_carlson_PS3')
logger.info ( 'Test 3-body phase space calculation via elliptic integrals' )
masses = ( 3 , 1 , 0.1 )
ps1 = Ostap.Math.PhaseSpace3 ( *masses )
ps2 = Ostap.Math.PhaseSpace3s ( *masses ) ## <--- HERE
ps3 = lambda x : Ostap.Kinematics.phasespace3a ( x , *masses ) ## non-symmetric form
with wait ( 3 ), use_canvas( 'test_phasespace3' ) :
ps1.draw ( xmin = ps1.threshold() , xmax = 50 , linecolor=2 , linewidth = 2 )
logger.info ( 'Red line - 3-body phase space via numerical integration' )
ps2.draw ( 'same' , xmin = ps1.threshold() , xmax = 50 , linecolor=4 , linewidth = 2 , linestyle = 9 )
logger.info ( 'Blue line - symmetric expression of 3-body phase space via elliptic integrals' )
f1_draw ( ps3 , 'same' , xmin = ps1.threshold () , xmax = 50 , linecolor=8 , linewidth = 2 , linestyle = 11 )
logger.info ( 'Green line - non-symmetric expression of 3-body phase space via elliptic integrals' )
# =============================================================================
def test_phasespace3i_permutations () :
masses = ( 3 , 1 , 0.1 )
funcs = []
for p in itertools.permutations ( masses ) :
f = lambda x : Ostap.Kinematics.phasespace3i ( x , *p )
funcs.append ( f )
from ostap.math.models import f1_draw
xmin = sum ( masses )
with wait ( 3 ), use_canvas( 'test_phasespace3i_permutations' ) :
for i, f in enumerate ( funcs ) :
color = i + 1
if i == 0 :
f1_draw ( f , line_color = color , linewidth = 2 , xmin = xmin , xmax = 50 )
else:
f1_draw ( f , 'same' , line_color = color , linewidth = 2 , xmin = xmin , xmax = 50 )
# =============================================================================
def test_phasespace3a_permutations () :
masses = ( 3 , 1 , 0.1 )
funcs = []
for p in itertools.permutations ( masses ) :
f = lambda x : Ostap.Kinematics.phasespace3a ( x , *p )
funcs.append ( f )
from ostap.math.models import f1_draw
xmin = sum ( masses )
with wait ( 3 ), use_canvas( 'test_phasespace3i_permutations' ) :
for i, f in enumerate ( funcs ) :
color = i + 1
if i == 0 :
f1_draw ( f , line_color = color , linewidth = 2 , xmin = xmin , xmax = 50 )
else:
f1_draw ( f , 'same' , line_color = color , linewidth = 2 , xmin = xmin , xmax = 50 )
# =============================================================================
def test_phasespace3s_permutations () :
masses = ( 3 , 1 , 0.1 )
funcs = []
for p in itertools.permutations ( masses ) :
f = lambda x : Ostap.Kinematics.phasespace3s ( x , *p )
funcs.append ( f )
xmin = sum ( masses )
with wait ( 3 ), use_canvas( 'test_phasespace3s_permutations' ) :
for i, f in enumerate ( funcs ) :
color = i + 1
if i == 0 :
f1_draw ( f , line_color = color , linewidth = 2 , xmin = xmin , xmax = 50 )
else:
f1_draw ( f , 'same' , line_color = color , linewidth = 2 , xmin = xmin , xmax = 50 )
# =============================================================================
def test_phasespace3_compare () :
## if 1 < 2 :
masses = ( 3 , 1 , 0.1 )
fun1 = lambda x : Ostap.Kinematics.phasespace3i ( x , *masses ) ## numerical integration
fun2 = lambda x : Ostap.Kinematics.phasespace3s ( x , *masses ) ## symmetric form
fun3 = lambda x : Ostap.Kinematics.phasespace3a ( x , *masses ) ## non-symmetric form
fun4 = lambda x : Ostap.Kinematics.phasespace3nr ( x , *masses ) ## non-relativistic limit
xmin = sum ( masses )
with wait ( 3 ), use_canvas( 'test_phasespace3_compare' ) :
for i, f in enumerate ( ( fun1 , fun2 , fun3 , fun4 ) ) :
color = i + 2
if i == 0 :
f1_draw ( f , line_color = color , linewidth = 2 , xmin = xmin , xmax = 40 )
else:
f1_draw ( f , 'same' , line_color = color , linewidth = 2 , xmin = xmin , xmax = 40 )
# =============================================================================
if '__main__' == __name__ :
test_phasespace3 ()
test_phasespace3s_permutations ()
test_phasespace3i_permutations ()
test_phasespace3a_permutations ()
test_phasespace3_compare ()
# =============================================================================
## The END
# =============================================================================
| {
"content_hash": "cec241529ccd133f3731057d2a17b621",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 119,
"avg_line_length": 43.172413793103445,
"alnum_prop": 0.5117145899893504,
"repo_name": "OstapHEP/ostap",
"id": "5d7cb114728ca774a5407d9bb42a3f16d34369e2",
"size": "7924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ostap/math/tests/test_math_phasespace3.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41595313"
},
{
"name": "C++",
"bytes": "7480608"
},
{
"name": "CMake",
"bytes": "43634"
},
{
"name": "Dockerfile",
"bytes": "1028"
},
{
"name": "Python",
"bytes": "6658186"
},
{
"name": "Shell",
"bytes": "10365"
}
],
"symlink_target": ""
} |
from parameters import applyPreset, preset, presetTree, group, \
registerPreset, allPresets, BadPreset, defaults, PCall, pcall
from parameters.runtime import Delta, Direct
from pmodule import PModule, pmodule, addToRunQueue
from manager import initialize, manager, reset, clean
from configuration import configTree
import creation
| {
"content_hash": "bd27e766fa51aad999e1ead99c6ce4c1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.8070175438596491,
"repo_name": "hoytak/lazyrunner",
"id": "da3c73beca7d273761948335778cfcc76bb2271e",
"size": "342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lazyrunner/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "177437"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from . import ble_cli
from .transport import Transport
class Transport_BLE(Transport):
def __init__(self, service_uuid, nu_lookup):
self.nu_lookup = nu_lookup
self.service_uuid = service_uuid
self.name_uuid_lookup = None
# Expect service UUID like '0000ffff-0000-1000-8000-00805f9b34fb'
for name in nu_lookup.keys():
# Calculate characteristic UUID for each endpoint
nu_lookup[name] = service_uuid[:4] + '{:02x}'.format(
int(nu_lookup[name], 16) & int(service_uuid[4:8], 16)) + service_uuid[8:]
# Get BLE client module
self.cli = ble_cli.get_client()
async def connect(self, devname):
# Use client to connect to BLE device and bind to service
if not await self.cli.connect(devname=devname, iface='hci0',
chrc_names=self.nu_lookup.keys(),
fallback_srv_uuid=self.service_uuid):
raise RuntimeError('Failed to initialize transport')
# Irrespective of provided parameters, let the client
# generate a lookup table by reading advertisement data
# and characteristic user descriptors
self.name_uuid_lookup = self.cli.get_nu_lookup()
# If that doesn't work, use the lookup table provided as parameter
if self.name_uuid_lookup is None:
self.name_uuid_lookup = self.nu_lookup
# Check if expected characteristics are provided by the service
for name in self.name_uuid_lookup.keys():
if not self.cli.has_characteristic(self.name_uuid_lookup[name]):
raise RuntimeError(f"'{name}' endpoint not found")
async def disconnect(self):
await self.cli.disconnect()
async def send_data(self, ep_name, data):
# Write (and read) data to characteristic corresponding to the endpoint
if ep_name not in self.name_uuid_lookup.keys():
raise RuntimeError(f'Invalid endpoint: {ep_name}')
return await self.cli.send_data(self.name_uuid_lookup[ep_name], data)
| {
"content_hash": "ba064be8b0a360e7521821fef508f432",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 89,
"avg_line_length": 44.875,
"alnum_prop": 0.6262766945218199,
"repo_name": "espressif/esp-idf",
"id": "3baa9be96fedd6125f0564ecf358e72a13b449b2",
"size": "2267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/esp_prov/transport/transport_ble.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "388440"
},
{
"name": "Batchfile",
"bytes": "5451"
},
{
"name": "C",
"bytes": "69102322"
},
{
"name": "C++",
"bytes": "992772"
},
{
"name": "CMake",
"bytes": "539972"
},
{
"name": "Dockerfile",
"bytes": "3290"
},
{
"name": "Makefile",
"bytes": "23747"
},
{
"name": "Nim",
"bytes": "1005"
},
{
"name": "PowerShell",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "2158180"
},
{
"name": "Roff",
"bytes": "101"
},
{
"name": "Shell",
"bytes": "126143"
}
],
"symlink_target": ""
} |
from platformio.platforms.base import BasePlatform
class TitivaPlatform(BasePlatform):
"""
An embedded platform for TI TIVA C ARM microcontrollers
(with Energia Framework)
"""
PACKAGES = {
"toolchain-gccarmnoneeabi": {
"alias": "toolchain",
"default": True
},
"ldscripts": {
"default": True
},
"tool-lm4flash": {
"alias": "uploader",
"default": True
},
"framework-energiativa": {
"alias": "framework",
"default": True
}
}
| {
"content_hash": "62cae4cb0ddd707629d44e7e7a025ac4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 19.677419354838708,
"alnum_prop": 0.4918032786885246,
"repo_name": "aphelps/platformio",
"id": "2c798e13c545567e84d561306b554f4fc5bc3117",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/platforms/titiva.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "6563"
},
{
"name": "C",
"bytes": "12238"
},
{
"name": "C++",
"bytes": "2146"
},
{
"name": "Makefile",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "143971"
},
{
"name": "Shell",
"bytes": "6709"
}
],
"symlink_target": ""
} |
from .util import avg
from .util import jaccard
from .util import ngrams
def features(record):
return [set(ngrams(attr, k=2, step=1)) for attr in record]
def hit(query_features, result_features):
return avg(list(map(jaccard, query_features, result_features)))
| {
"content_hash": "ef29a153ed221edfeb26ec09c407b8d0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 24.727272727272727,
"alnum_prop": 0.7316176470588235,
"repo_name": "rschwager-mm/polymr",
"id": "fcb9614fc4b197dd567dbe6e1545db25ea0f5cf8",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polymr/score.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93047"
},
{
"name": "Shell",
"bytes": "1319"
}
],
"symlink_target": ""
} |
from mitmproxy import controller
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy import websocket
Events = frozenset([
"clientconnect",
"clientdisconnect",
"serverconnect",
"serverdisconnect",
"tcp_start",
"tcp_message",
"tcp_error",
"tcp_end",
"http_connect",
"request",
"requestheaders",
"response",
"responseheaders",
"error",
"websocket_handshake",
"websocket_start",
"websocket_message",
"websocket_error",
"websocket_end",
"next_layer",
"configure",
"done",
"log",
"start",
"tick",
])
def event_sequence(f):
if isinstance(f, http.HTTPFlow):
if f.request:
yield "requestheaders", f
yield "request", f
if f.response:
yield "responseheaders", f
yield "response", f
if f.error:
yield "error", f
elif isinstance(f, websocket.WebSocketFlow):
messages = f.messages
f.messages = []
f.reply = controller.DummyReply()
yield "websocket_start", f
while messages:
f.messages.append(messages.pop(0))
yield "websocket_message", f
if f.error:
yield "websocket_error", f
yield "websocket_end", f
elif isinstance(f, tcp.TCPFlow):
messages = f.messages
f.messages = []
f.reply = controller.DummyReply()
yield "tcp_start", f
while messages:
f.messages.append(messages.pop(0))
yield "tcp_message", f
if f.error:
yield "tcp_error", f
yield "tcp_end", f
else:
raise NotImplementedError
| {
"content_hash": "934c5a3f6935eeeff041f38b79234e3b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 48,
"avg_line_length": 23.136986301369863,
"alnum_prop": 0.5630550621669627,
"repo_name": "dwfreed/mitmproxy",
"id": "53f236ca3bcaab72901f251a73f492da1c41734a",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mitmproxy/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "208058"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "2149949"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1378470"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
} |
import unittest
import logging
from tests.test_source import SourceTestCase
from dipper.sources.OMIA import OMIA
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
class OMIATestCase(SourceTestCase):
def setUp(self):
self.source = OMIA('rdf_graph', True)
self.source.settestonly(True)
self._setDirToSource()
return
def tearDown(self):
self.source = None
return
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6750c22a149d1720520847f7679eab63",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 45,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.67,
"repo_name": "monarch-initiative/dipper",
"id": "c7fce9a3ab12107bdb42d10935ba4e853e49186b",
"size": "524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_omia.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "28090"
},
{
"name": "HTML",
"bytes": "221793"
},
{
"name": "Makefile",
"bytes": "7675"
},
{
"name": "Python",
"bytes": "1432342"
},
{
"name": "Shell",
"bytes": "11180"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse, HttpResponseRedirect
from wagboot.models import WebsiteSettings
def robots_txt(request):
website_settings = WebsiteSettings.for_site(request.site)
return HttpResponse(content=website_settings.robots_txt or "Allow /\n", content_type="text/plain")
def redirect_to_login(request):
"""
We don't have fixed login view.
So this view redirects to the currently selected login page in WebsiteSettings.login_page.
If it does not exists it redirects to root.
"""
login_url = WebsiteSettings.get_login_url(request.site)
return HttpResponseRedirect(login_url or '/')
| {
"content_hash": "02270376e80848092452e9939516e6ff",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 102,
"avg_line_length": 30.217391304347824,
"alnum_prop": 0.7467625899280576,
"repo_name": "wagboot/wagboot",
"id": "7f010cba7965301c09a5ae99ec9044e88c0ef974",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagboot/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "20384"
},
{
"name": "Python",
"bytes": "60909"
}
],
"symlink_target": ""
} |
import os
import sys
import numpy as np
from mayavi.mlab import surf, show, figure, axes, points3d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
sys.path.append("src/python/modules")
from regression import *
def list_datafiles(path):
"""
Return a list of files with experimental data
@param path Directory to search for files
@return List with file names
"""
files = os.listdir(path)
return [os.path.join(path, f) for f in files if ".csv" in f]
def plot_3d_mayavi(X, Y, Z):
f = figure(bgcolor=(1, 1, 1))
surf(X.T, Y.T, Z, figure=f, warp_scale='auto')
axes(xlabel='N Samples', ylabel='Sample', zlabel='Gradient',
z_axis_visibility=False, nb_labels=10, line_width=1)
show()
def plot_3d_mplot(X, Y, Z, title=""):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X.T, Y.T, Z.T, rstride=1, cstride=1, linewidth=0.5, antialiased=True)
plt.xlabel("N Samples")
plt.ylabel("Sample")
plt.title(title)
# ax.zlabel("Gradient")
plt.show()
def main(test=1, plot=True, save=False):
test_list = list_datafiles("data/")
data = np.loadtxt(test_list[test], delimiter=',', skiprows=1, usecols=(0, 1, 2))
slices = list(range(10, len(data[:, 0]), 1))
reg = Regression(data[:, 1], data[:, 2])
Z = reg.analyze(slices)
X, Y = np.meshgrid(slices, data[:, 0])
M = np.vstack([X.reshape((1, -1)), Y.reshape((1, -1)), Z.reshape((1, -1))])
if save:
filename = test_list[test].replace('data', 'out')
np.savetxt(filename, M.T, delimiter=',')
print("Saving to {0}".format(filename))
if plot:
plot_3d_mayavi(X, Y, Z)
# plot_3d_mplot(X, Y, Z, title=test_list[test])
def test_all():
total = len(list_datafiles("data/"))
for i in range(total):
main(test=i, plot=False, save=True)
if __name__ == "__main__":
try:
if sys.argv[1] == "all":
test_all()
else:
main(int(sys.argv[1]))
except Exception as e:
print(e)
exit(0) | {
"content_hash": "c3561db860616c119e79513a7ee1d688",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 89,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.5957955088389871,
"repo_name": "carlgonz/u-fit",
"id": "4d7dba82a9a6e198a2c738f2b30322ca0ca3d526",
"size": "2093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1470"
},
{
"name": "JavaScript",
"bytes": "244986"
},
{
"name": "Python",
"bytes": "158848"
},
{
"name": "Shell",
"bytes": "306"
}
],
"symlink_target": ""
} |
"""
A module for managing apps, specs, requirements, and for starting jobs.
"""
import biokbase.auth as auth
from .job import Job
from .jobmanager import JobManager
from .jobcomm import JobComm
from . import specmanager
import biokbase.narrative.clients as clients
from biokbase.narrative.widgetmanager import WidgetManager
from biokbase.narrative.app_util import (
system_variable,
strict_system_variable,
map_outputs_from_state,
validate_parameters,
resolve_ref_if_typed,
transform_param_value,
extract_ws_refs
)
from biokbase.narrative.exception_util import (
transform_job_exception
)
from biokbase.narrative.common import kblogging
import re
import datetime
import traceback
import random
"""
A module for managing apps, specs, requirements, and for starting jobs.
"""
__author__ = "Bill Riehl <wjriehl@lbl.gov>"
class AppManager(object):
"""
The main class for managing how KBase apps get run. This contains functions
for showing app descriptions, their usage (how to invoke various
parameters), and, ultimately, for running the app.
A typical flow might be like this.
am = AppManager()
am.available_apps()
# show the set of apps with a brief description of each.
am.app_usage(app_id)
# show how to use a app and set its parameters.
job = am.run_app(app_id, input1=value1, input2=value2, ...)
# run an app with given inputs.
"""
__instance = None
__MAX_TOKEN_NAME_LEN = 30
spec_manager = specmanager.SpecManager()
_log = kblogging.get_logger(__name__)
_comm = None
viewer_count = 1
def __new__(cls):
if AppManager.__instance is None:
AppManager.__instance = object.__new__(cls)
AppManager.__instance._comm = None
return AppManager.__instance
def reload(self):
"""
Reloads all app specs into memory from the App Catalog.
Any outputs of app_usage, app_description, or available_apps
should be run again after the update.
"""
self.spec_manager.reload()
def app_usage(self, app_id, tag='release'):
"""
This shows the list of inputs and outputs for a given app with a given
tag. By default, this is done in a pretty HTML way, but this app can be
wrapped in str() to show a bare formatted string.
If either the app_id is unknown, or isn't found with the given release
tag, or if the tag is unknown, a ValueError will be raised.
Parameters:
-----------
app_id : string
A KBase app id, generally of the format Module_name/app_name
(see available_apps for a list)
tag : Which version of the app to view - either release, beta, or dev
(default=release)
"""
return self.spec_manager.app_usage(app_id, tag)
def app_description(self, app_id, tag='release'):
"""
Returns the app description in a printable HTML format.
If either the app_id is unknown, or isn't found with the given release
tag, or if the tag is unknown, a ValueError will be raised.
Parameters:
-----------
app_id : string
A KBase app id, generally of the format Module_name/app_name
(see available_apps for a list)
tag : Which version of the app to view - either release, beta, or dev
(default=release)
"""
return self.spec_manager.app_description(app_id, tag)
def available_apps(self, tag="release"):
"""
Lists the set of available apps for a given tag in a simple table.
If the tag is not found, a ValueError will be raised.
Parameters:
-----------
tag : Which version of the list of apps to view - either release, beta,
or dev (default=release)
"""
return self.spec_manager.available_apps(tag)
def run_app_batch(self, app_id, params, tag="release", version=None,
cell_id=None, run_id=None, dry_run=False):
try:
if params is None:
params = list()
return self._run_app_batch_internal(app_id, params, tag, version,
cell_id, run_id, dry_run)
except Exception as e:
e_type = type(e).__name__
e_message = str(e).replace('<', '<').replace('>', '>')
e_trace = traceback.format_exc()
e_trace = e_trace.replace('<', '<').replace('>', '>')
e_code = getattr(e, 'code', -1)
e_source = getattr(e, 'source', 'appmanager')
self._send_comm_message('run_status', {
'event': 'error',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id,
'error_message': e_message,
'error_type': e_type,
'error_stacktrace': e_trace,
'error_code': e_code,
'error_source': e_source
})
print("Error while trying to start your app (run_app_batch)!\n" +
"-----------------------------------------------------\n" +
str(e) + "\n" +
"-----------------------------------------------------\n" +
e_trace)
return
def _run_app_batch_internal(self, app_id, params, tag, version, cell_id, run_id, dry_run):
batch_method = "kb_BatchApp.run_batch"
batch_app_id = "kb_BatchApp/run_batch"
batch_method_ver = "dev"
batch_method_tag = "dev"
ws_id = strict_system_variable('workspace_id')
spec = self._get_validated_app_spec(app_id, tag, True, version=version)
# Preflight check the params - all required ones are present, all
# values are the right type, all numerical values are in given ranges
spec_params = self.spec_manager.app_params(spec)
# A list of lists of UPAs, used for each subjob.
batch_ws_upas = list()
# The list of actual input values, post-mapping.
batch_run_inputs = list()
for param_set in params:
spec_params_map = dict((spec_params[i]['id'], spec_params[i])
for i in range(len(spec_params)))
batch_ws_upas.append(extract_ws_refs(app_id, tag, spec_params, param_set))
batch_run_inputs.append(self._map_inputs(
spec['behavior']['kb_service_input_mapping'],
param_set,
spec_params_map))
service_method = spec['behavior']['kb_service_method']
service_name = spec['behavior']['kb_service_name']
service_ver = spec['behavior'].get('kb_service_version', None)
# Let the given version override the spec's version.
if version is not None:
service_ver = version
# This is what calls the function in the back end - Module.method
# This isn't the same as the app spec id.
job_meta = {
'tag': batch_method_tag,
'batch_app': app_id,
'batch_tag': tag,
'batch_size': len(params),
}
if cell_id is not None:
job_meta['cell_id'] = cell_id
if run_id is not None:
job_meta['run_id'] = run_id
# Now put these all together in a way that can be sent to the batch processing app.
batch_params = [{
"module_name": service_name,
"method_name": service_method,
"service_ver": service_ver,
"wsid": ws_id,
"meta": job_meta,
"batch_params": [{
"params": batch_run_inputs[i],
"source_ws_objects": batch_ws_upas[i]
} for i in range(len(batch_run_inputs))],
}]
# We're now almost ready to run the job. Last, we need an agent token.
try:
token_name = 'KBApp_{}'.format(app_id)
token_name = token_name[:self.__MAX_TOKEN_NAME_LEN]
agent_token = auth.get_agent_token(auth.get_auth_token(), token_name=token_name)
except Exception as e:
raise
job_meta['token_id'] = agent_token['id']
# This is the input set for NJSW.run_job. Now we need the workspace id
# and whatever fits in the metadata.
job_runner_inputs = {
'method': batch_method,
'service_ver': batch_method_ver,
'params': batch_params,
'app_id': batch_app_id,
'wsid': ws_id,
'meta': job_meta
}
# if len(ws_input_refs) > 0:
# job_runner_inputs['source_ws_objects'] = ws_input_refs
# if we're doing a dry run, just return the inputs that we made.
if dry_run:
return job_runner_inputs
# Log that we're trying to run a job...
log_info = {
'app_id': app_id,
'tag': batch_method_tag,
'version': service_ver,
'username': system_variable('user_id'),
'wsid': ws_id
}
kblogging.log_event(self._log, "run_batch_app", log_info)
try:
job_id = clients.get("execution_engine2", token=agent_token['token']).run_job(job_runner_inputs)
except Exception as e:
log_info.update({'err': str(e)})
kblogging.log_event(self._log, "run_batch_app_error", log_info)
raise transform_job_exception(e)
new_job = Job(job_id,
batch_app_id,
batch_params,
system_variable('user_id'),
tag=batch_method_tag,
app_version=batch_method_ver,
cell_id=cell_id,
run_id=run_id,
token_id=agent_token['id'],
meta=job_meta)
self._send_comm_message('run_status', {
'event': 'launched_job',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id,
'job_id': job_id
})
self.register_new_job(new_job)
if cell_id is not None:
return
else:
return new_job
def run_app(self, app_id, params, tag="release", version=None,
cell_id=None, run_id=None, dry_run=False):
"""
Attempts to run the app, returns a Job with the running app info.
If this is given a cell_id, then returns None. If not, it returns the
generated Job object.
Parameters:
-----------
app_id - should be from the app spec, e.g. 'build_a_metabolic_model'
or 'MegaHit/run_megahit'.
params - this is hte dictionary of parameters to tbe used with the app.
They can be found by using the app_usage function. If any
non-optional apps are missing, a ValueError will be raised.
tag - optional, one of [release|beta|dev] (default=release)
version - optional, a semantic version string. Only released modules
have versions, so if the tag is not 'release', and a version
is given, a ValueError will be raised.
Example:
--------
run_app('MegaHit/run_megahit',
{
'read_library_name' : 'My_PE_Library',
'output_contigset_name' : 'My_Contig_Assembly'
},
version='>=1.0.0'
)
"""
try:
if params is None:
params = dict()
return self._run_app_internal(app_id, params, tag, version,
cell_id, run_id, dry_run)
except Exception as e:
e_type = type(e).__name__
e_message = str(e).replace('<', '<').replace('>', '>')
e_trace = traceback.format_exc()
e_trace = e_trace.replace('<', '<').replace('>', '>')
e_code = getattr(e, 'code', -1)
e_source = getattr(e, 'source', 'appmanager')
self._send_comm_message('run_status', {
'event': 'error',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id,
'error_message': e_message,
'error_type': e_type,
'error_stacktrace': e_trace,
'error_code': e_code,
'error_source': e_source
})
print("Error while trying to start your app (run_app)!\n" +
"-----------------------------------------------\n" +
str(e) + "\n" +
"-----------------------------------------------\n" +
e_trace)
return
def _run_app_internal(self, app_id, params, tag, version,
cell_id, run_id, dry_run):
"""
Attemps to run the app, returns a Job with the running app info.
Should *hopefully* also inject that app into the Narrative's metadata.
Probably need some kind of JavaScript-foo to get that to work.
Parameters:
-----------
app_id - should be from the app spec, e.g. 'build_a_metabolic_model'
or 'MegaHit/run_megahit'.
params - a dictionary of parameters.
tag - optional, one of [release|beta|dev] (default=release)
version - optional, a semantic version string. Only released modules
have versions, so if the tag is not 'release', and a version
is given, a ValueError will be raised.
**kwargs - these are the set of parameters to be used with the app.
They can be found by using the app_usage function. If any
non-optional apps are missing, a ValueError will be raised.
"""
ws_id = strict_system_variable('workspace_id')
spec = self._get_validated_app_spec(app_id, tag, True, version=version)
# Preflight check the params - all required ones are present, all
# values are the right type, all numerical values are in given ranges
spec_params = self.spec_manager.app_params(spec)
spec_params_map = dict((spec_params[i]['id'], spec_params[i])
for i in range(len(spec_params)))
ws_input_refs = extract_ws_refs(app_id, tag, spec_params, params)
input_vals = self._map_inputs(
spec['behavior']['kb_service_input_mapping'],
params,
spec_params_map)
service_method = spec['behavior']['kb_service_method']
service_name = spec['behavior']['kb_service_name']
service_ver = spec['behavior'].get('kb_service_version', None)
# Let the given version override the spec's version.
if version is not None:
service_ver = version
# This is what calls the function in the back end - Module.method
# This isn't the same as the app spec id.
function_name = service_name + '.' + service_method
job_meta = {'tag': tag}
if cell_id is not None:
job_meta['cell_id'] = cell_id
if run_id is not None:
job_meta['run_id'] = run_id
# This is the input set for NJSW.run_job. Now we need the workspace id
# and whatever fits in the metadata.
job_runner_inputs = {
'method': function_name,
'service_ver': service_ver,
'params': input_vals,
'app_id': app_id,
'wsid': ws_id,
'meta': job_meta
}
if len(ws_input_refs) > 0:
job_runner_inputs['source_ws_objects'] = ws_input_refs
if dry_run:
return job_runner_inputs
# We're now almost ready to run the job. Last, we need an agent token.
try:
token_name = 'KBApp_{}'.format(app_id)
token_name = token_name[:self.__MAX_TOKEN_NAME_LEN]
agent_token = auth.get_agent_token(auth.get_auth_token(), token_name=token_name)
except Exception as e:
raise
job_runner_inputs['meta']['token_id'] = agent_token['id']
# Log that we're trying to run a job...
log_info = {
'app_id': app_id,
'tag': tag,
'version': service_ver,
'username': system_variable('user_id'),
'wsid': ws_id
}
kblogging.log_event(self._log, "run_app", log_info)
try:
job_id = clients.get("execution_engine2", token=agent_token['token']).run_job(job_runner_inputs)
except Exception as e:
log_info.update({'err': str(e)})
kblogging.log_event(self._log, "run_app_error", log_info)
raise transform_job_exception(e)
new_job = Job(job_id,
app_id,
input_vals,
system_variable('user_id'),
tag=tag,
app_version=service_ver,
cell_id=cell_id,
run_id=run_id,
token_id=agent_token['id'])
self._send_comm_message('run_status', {
'event': 'launched_job',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id,
'job_id': job_id
})
self.register_new_job(new_job)
if cell_id is not None:
return
else:
return new_job
def run_local_app(self, app_id, params, tag="release", version=None, cell_id=None, run_id=None,
widget_state=None):
"""
Attempts to run a local app. These do not return a Job object, but just
the result of the app. In most cases, this will be a Javascript display
of the result, but could be anything.
If the app_spec looks like it makes a service call, then this raises a
ValueError. Otherwise, it validates each parameter in **kwargs against
the app spec, executes it, and returns the result.
Parameters:
-----------
app_id - should be from the app spec, e.g. 'view_expression_profile'
params - the dictionary of parameters for the app. Should be key-value
pairs where they keys are strings. If any non-optional
parameters are missing, an informative string will be printed.
tag - optional, one of [release|beta|dev] (default=release)
version - optional, a semantic version string. Only released modules
have versions, so if the tag is not 'release', and a version
is given, a ValueError will be raised.
Example:
run_local_app('NarrativeViewers/view_expression_profile',
{
"input_expression_matrix": "MyMatrix",
"input_gene_ids": "1234"
},
version='0.0.1',
input_expression_matrix="MyMatrix")
"""
try:
if params is None:
params = dict()
return self._run_local_app_internal(app_id, params, widget_state, tag, version,
cell_id, run_id)
except Exception as e:
e_type = type(e).__name__
e_message = str(e).replace('<', '<').replace('>', '>')
e_trace = traceback.format_exc()
e_trace = e_trace.replace('<', '<').replace('>', '>')
self._send_comm_message('run_status', {
'event': 'error',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id,
'error_message': e_message,
'error_type': e_type,
'error_stacktrace': e_trace
})
# raise
print("Error while trying to start your app (run_local_app)!\n" +
"-------------------------------------\n" + str(e))
def _run_local_app_internal(self, app_id, params, widget_state, tag, version, cell_id, run_id):
self._send_comm_message('run_status', {
'event': 'validating_app',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id
})
spec = self._get_validated_app_spec(app_id, tag, False, version=version)
# Here, we just deal with two behaviors:
# 1. None of the above - it's a viewer.
# 2. ***TODO*** python_class / python_function.
# Import and exec the python code.
# for now, just map the inputs to outputs.
# First, validate.
# Preflight check the params - all required ones are present, all
# values are the right type, all numerical values are in given ranges
spec_params = self.spec_manager.app_params(spec)
(params, ws_refs) = validate_parameters(app_id, tag, spec_params, params)
# Log that we're trying to run a job...
log_info = {
'app_id': app_id,
'tag': tag,
'username': system_variable('user_id'),
'ws': system_variable('workspace')
}
kblogging.log_event(self._log, "run_local_app", log_info)
self._send_comm_message('run_status', {
'event': 'success',
'event_at': datetime.datetime.utcnow().isoformat() + 'Z',
'cell_id': cell_id,
'run_id': run_id
})
(output_widget, widget_params) = map_outputs_from_state([],
params,
spec)
# All a local app does is route the inputs to outputs through the
# spec's mapping, and then feed that into the specified output widget.
wm = WidgetManager()
if widget_state is not None:
return wm.show_advanced_viewer_widget(
output_widget, widget_params, widget_state, cell_id=cell_id, tag=tag
)
else:
return wm.show_output_widget(
output_widget, widget_params, cell_id=cell_id, tag=tag
)
def run_local_app_advanced(self, app_id, params, widget_state, tag="release", version=None,
cell_id=None, run_id=None):
return self.run_local_app(app_id, params, widget_state=widget_state, tag=tag,
version=version, cell_id=cell_id, run_id=run_id)
def run_dynamic_service(self, app_id, params, tag="release", version=None,
cell_id=None, run_id=None):
"""
Attempts to run a local app. These do not return a Job object, but just
the result of the app. In most cases, this will be a Javascript display
of the result, but could be anything.
If the app_spec looks like it makes a service call, then this raises a ValueError.
Otherwise, it validates each parameter in **kwargs against the app spec, executes it, and
returns the result.
Parameters:
-----------
app_id - should be from the app spec, e.g. 'view_expression_profile'
params - the dictionary of parameters for the app. Should be key-value
pairs where they keys are strings. If any non-optional
parameters are missing, an informative string will be printed.
tag - optional, one of [release|beta|dev] (default=release)
version - optional, a semantic version string. Only released modules have
versions, so if the tag is not 'release', and a version is given,
a ValueError will be raised.
**kwargs - these are the set of parameters to be used with the app.
They can be found by using the app_usage function. If any
non-optional apps are missing, a ValueError will be raised.
Example:
run_local_app('NarrativeViewers/view_expression_profile', version='0.0.1',
input_expression_matrix="MyMatrix", input_gene_ids="1234")
"""
try:
if params is None:
params = dict()
return self._run_dynamic_service_internal(
app_id, params, tag, version, cell_id, run_id, **kwargs
)
except Exception as e:
e_type = type(e).__name__
e_message = str(e).replace('<', '<').replace('>', '>')
e_trace = traceback.format_exc().replace('<', '<').replace('>', '>')
if cell_id:
self.send_cell_message('result', cell_id, run_id, {
'error': {
'message': e_message,
'type': e_type,
'stacktrace': e_trace
}
})
else:
print("Error while trying to start your app (run_local_app)!" +
"\n-------------------------------------\n" +
str(e))
def _run_dynamic_service_internal(self, app_id, params, tag, version, cell_id, run_id):
spec = self._get_validated_app_spec(app_id, tag, False, version=version)
# Log that we're trying to run a job...
log_info = {
'app_id': app_id,
'tag': tag,
'username': system_variable('user_id'),
'ws': system_variable('workspace')
}
kblogging.log_event(self._log, "run_dynamic_service", log_info)
# Silly to keep this here, but we do not validate the incoming parameters.
# If they are provided by the UI (we have cell_id), they are constructed
# according to the spec, so are trusted;
# Otherwise, if they are the product of direct code cell entry, this is a mode we do not
# "support", so we can let it fail hard.
# In the future when code cell interaction is supported for users, we will need to provide
# robust validation and error reporting, but this may end up being (should be) provided by the
# sdk execution infrastructure anyway
input_vals = params
function_name = spec['behavior']['kb_service_name'] + '.' + spec['behavior']['kb_service_method']
try:
result = clients.get("service").sync_call(
function_name,
input_vals,
service_version=tag
)[0]
# if a ui call (a cell_id is defined) we send a result message, otherwise
# just the raw result for display in a code cell. This is how we "support"
# code cells for internal usage.
if cell_id:
self.send_cell_message('result', cell_id, run_id, {
'result': result
})
else:
return result
except:
raise
def send_cell_message(self, message_id, cell_id, run_id, message):
address = {
'cell_id': cell_id,
'run_id': run_id,
'event_at': datetime.datetime.utcnow().isoformat() + 'Z'
}
self._send_comm_message(message_id, {
'address': address,
'message': message
})
def _get_validated_app_spec(self, app_id, tag, is_long, version=None):
if version is not None and tag != "release":
if re.match(r'\d+\.\d+\.\d+', version) is not None:
raise ValueError(
"Semantic versions only apply to released app modules. " +
"You can use a Git commit hash instead to specify a " +
"version.")
self.spec_manager.check_app(app_id, tag, raise_exception=True)
# Get the spec & params
spec = self.spec_manager.get_spec(app_id, tag)
if 'behavior' not in spec:
raise ValueError("This app appears invalid - it has no defined behavior")
if 'script_module' in spec['behavior'] or 'script_name' in spec['behavior']:
# It's an old NJS script. These don't work anymore.
raise ValueError('This app relies on a service that is now obsolete. Please contact ' +
'the administrator.')
if is_long and 'kb_service_input_mapping' not in spec['behavior']:
raise ValueError("This app does not appear to be a long-running " +
"job! Please use 'run_local_app' to start this " +
"instead.")
return spec
def _map_group_inputs(self, value, spec_param, spec_params):
if isinstance(value, list):
return [self._map_group_inputs(v, spec_param, spec_params)
for v in value]
elif value is None:
return None
else:
mapped_value = dict()
id_map = spec_param.get('id_mapping', {})
for param_id in id_map:
# ensure that the param referenced in the group param list
# exists in the spec.
# NB: This should really never happen if the sdk registration
# process validates them.
if param_id not in spec_params:
msg = "Unknown parameter id in group mapping: " + param_id
raise ValueError(msg)
for param_id in value:
target_key = id_map.get(param_id, param_id)
# Sets either the raw value, or if the parameter is an object
# reference the full object refernce (see the method).
if value[param_id] is None:
target_val = None
else:
target_val = resolve_ref_if_typed(value[param_id], spec_params[param_id])
mapped_value[target_key] = target_val
return mapped_value
def _map_inputs(self, input_mapping, params, spec_params):
"""
Maps the dictionary of parameters and inputs based on rules provided in
the input_mapping. This iterates over the list of input_mappings, and
uses them as a filter to apply to each parameter.
Returns a list of inputs that can be passed directly to NJSW.run_job
input_mapping is a list of dicts, as defined by
NarrativeMethodStore.ServiceMethodInputMapping.
params is a dict of key-value-pairs, each key is the input_parameter
field of some parameter.
"""
inputs_dict = dict()
for p in input_mapping:
# 2 steps - figure out the proper value, then figure out the
# proper position. value first!
p_value = None
input_param_id = None
if 'input_parameter' in p:
input_param_id = p['input_parameter']
p_value = params.get(input_param_id, None)
if spec_params[input_param_id].get('type', '') == 'group':
p_value = self._map_group_inputs(p_value, spec_params[input_param_id],
spec_params)
# turn empty strings into None
if isinstance(p_value, str) and len(p_value) == 0:
p_value = None
elif 'narrative_system_variable' in p:
p_value = system_variable(p['narrative_system_variable'])
if 'constant_value' in p and p_value is None:
p_value = p['constant_value']
if 'generated_value' in p and p_value is None:
p_value = self._generate_input(p['generated_value'])
spec_param = None
if input_param_id:
spec_param = spec_params[input_param_id]
p_value = transform_param_value(p.get('target_type_transform'), p_value, spec_param)
# get position!
arg_position = p.get('target_argument_position', 0)
target_prop = p.get('target_property', None)
if target_prop is not None:
final_input = inputs_dict.get(arg_position, dict())
if '/' in target_prop:
# This is case when slashes in target_prop separate
# elements in nested maps. We ignore escaped slashes
# (separate backslashes should be escaped as well).
bck_slash = "\u244A"
fwd_slash = "\u20EB"
temp_string = target_prop.replace("\\\\", bck_slash)
temp_string = temp_string.replace("\\/", fwd_slash)
temp_path = []
for part in temp_string.split("/"):
part = part.replace(bck_slash, "\\")
part = part.replace(fwd_slash, "/")
temp_path.append(part.encode('ascii', 'ignore').decode("ascii"))
temp_map = final_input
temp_key = None
# We're going along the path and creating intermediate
# dictionaries.
for temp_path_item in temp_path:
if temp_key:
if temp_key not in temp_map:
temp_map[temp_key] = {}
temp_map = temp_map[temp_key]
temp_key = temp_path_item
# temp_map points to deepest nested map now, temp_key is
# the last item in the path
temp_map[temp_key] = p_value
else:
final_input[target_prop] = p_value
inputs_dict[arg_position] = final_input
else:
inputs_dict[arg_position] = p_value
inputs_list = list()
keys = sorted(inputs_dict.keys())
for k in keys:
inputs_list.append(inputs_dict[k])
return inputs_list
def _generate_input(self, generator):
"""
Generates an input value using rules given by
NarrativeMethodStore.AutoGeneratedValue.
generator - dict
has 3 optional properties:
prefix - if present, is prepended to the generated string.
symbols - if present is the number of symbols to autogenerate (if
not present, default=8)
suffix - if present, is appended to the generated string.
So, if generator is None or an empty dict, returns an 8-symbol string.
"""
symbols = 8
if 'symbols' in generator:
try:
symbols = int(generator['symbols'])
except:
raise ValueError(
'The "symbols" input to the generated value must be an ' +
'integer > 0!'
)
if symbols < 1:
raise ValueError(
'Must have at least 1 symbol to randomly generate!'
)
ret = ''.join([chr(random.randrange(0, 26) + ord('A'))
for _ in range(symbols)])
if 'prefix' in generator:
ret = str(generator['prefix']) + ret
if 'suffix' in generator:
ret = ret + str(generator['suffix'])
return ret
def _send_comm_message(self, msg_type, content):
JobComm().send_comm_message(msg_type, content)
def register_new_job(self, job: Job) -> None:
JobManager().register_new_job(job)
self._send_comm_message("new_job", {"job_id": job.job_id})
JobComm().lookup_job_state(job.job_id)
JobComm().start_job_status_loop()
| {
"content_hash": "d90d522dbc17bb7ce434918cf3fd30b3",
"timestamp": "",
"source": "github",
"line_count": 850,
"max_line_length": 108,
"avg_line_length": 42.35058823529412,
"alnum_prop": 0.5313906328129341,
"repo_name": "pranjan77/narrative",
"id": "5640ec3e4f4361f32847f49838a538ea3ecfcbdb",
"size": "35998",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/biokbase/narrative/jobs/appmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159688"
},
{
"name": "HTML",
"bytes": "113401"
},
{
"name": "JavaScript",
"bytes": "8064370"
},
{
"name": "Lua",
"bytes": "89680"
},
{
"name": "Makefile",
"bytes": "9726"
},
{
"name": "PHP",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "3697412"
},
{
"name": "R",
"bytes": "39956"
},
{
"name": "Ruby",
"bytes": "3328"
},
{
"name": "Shell",
"bytes": "18976"
},
{
"name": "Smarty",
"bytes": "9051"
}
],
"symlink_target": ""
} |
import copy
import pytest
from configuration import available_ports, ALL_TEST_CIPHERS, ALL_TEST_CURVES, ALL_TEST_CERTS
from common import ProviderOptions, Protocols, data_bytes
from fixtures import managed_process
from providers import Provider, S2N, OpenSSL, GnuTLS
from utils import invalid_test_parameters, get_parameter_name, get_expected_s2n_version, get_expected_openssl_version, \
to_bytes, get_expected_gnutls_version
def test_nothing():
"""
Sometimes the version negotiation test parameters in combination with the s2n
libcrypto results in no test cases existing. In this case, pass a nothing test to
avoid marking the entire codebuild run as failed.
"""
assert True
def invalid_version_negotiation_test_parameters(*args, **kwargs):
# Since s2nd/s2nc will always be using TLS 1.3, make sure the libcrypto is compatible
if invalid_test_parameters(**{
"provider": S2N,
"protocol": Protocols.TLS13
}):
return True
return invalid_test_parameters(*args, **kwargs)
@pytest.mark.uncollect_if(func=invalid_version_negotiation_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [Protocols.TLS12, Protocols.TLS11, Protocols.TLS10], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [S2N, OpenSSL, GnuTLS], ids=get_parameter_name)
@pytest.mark.parametrize("other_provider", [S2N], ids=get_parameter_name)
def test_s2nc_tls13_negotiates_tls12(managed_process, cipher, curve, certificate, protocol, provider, other_provider):
port = next(available_ports)
random_bytes = data_bytes(24)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
data_to_send=random_bytes,
insecure=True,
protocol=Protocols.TLS13
)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.protocol = protocol
kill_marker = None
if provider == GnuTLS:
kill_marker = random_bytes
server = managed_process(provider, server_options,
timeout=5, kill_marker=kill_marker)
client = managed_process(S2N, client_options, timeout=5)
client_version = get_expected_s2n_version(Protocols.TLS13, provider)
actual_version = get_expected_s2n_version(protocol, provider)
for results in client.get_results():
results.assert_success()
assert to_bytes("Client protocol version: {}".format(
client_version)) in results.stdout
assert to_bytes("Actual protocol version: {}".format(
actual_version)) in results.stdout
for results in server.get_results():
results.assert_success()
if provider is S2N:
# The server is only TLS12, so it reads the version from the CLIENT_HELLO, which is never above TLS12
# This check only cares about S2N. Trying to maintain expected output of other providers doesn't
# add benefit to whether the S2N client was able to negotiate a lower TLS version.
assert to_bytes("Client protocol version: {}".format(
actual_version)) in results.stdout
assert to_bytes("Actual protocol version: {}".format(
actual_version)) in results.stdout
assert any([
random_bytes[1:] in stream
for stream in results.output_streams()
])
@pytest.mark.uncollect_if(func=invalid_version_negotiation_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [Protocols.TLS12, Protocols.TLS11, Protocols.TLS10], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [S2N, OpenSSL, GnuTLS], ids=get_parameter_name)
@pytest.mark.parametrize("other_provider", [S2N], ids=get_parameter_name)
def test_s2nd_tls13_negotiates_tls12(managed_process, cipher, curve, certificate, protocol, provider, other_provider):
port = next(available_ports)
random_bytes = data_bytes(24)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
data_to_send=random_bytes,
insecure=True,
protocol=protocol
)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
# When the protocol is set to TLS13, the s2n server provider will default to using
# all ciphers, not just the TLS13 ciphers. This is the desired behavior for this test.
server_options.protocol = Protocols.TLS13
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(provider, client_options, timeout=5)
server_version = get_expected_s2n_version(Protocols.TLS13, provider)
actual_version = get_expected_s2n_version(protocol, provider)
for results in client.get_results():
results.assert_success()
if provider is S2N:
# The client will get the server version from the SERVER HELLO, which will be the negotiated version
assert to_bytes("Server protocol version: {}".format(
actual_version)) in results.stdout
assert to_bytes("Actual protocol version: {}".format(
actual_version)) in results.stdout
elif provider is OpenSSL:
# This check cares about other providers because we want to know that they did negotiate the version
# that our S2N server intended to negotiate.
openssl_version = get_expected_openssl_version(protocol)
assert to_bytes("Protocol : {}".format(
openssl_version)) in results.stdout
elif provider is GnuTLS:
gnutls_version = get_expected_gnutls_version(protocol)
assert to_bytes(f"Version: {gnutls_version}") in results.stdout
for results in server.get_results():
results.assert_success()
assert (
to_bytes("Server protocol version: {}".format(server_version))
in results.stdout
)
assert (
to_bytes("Actual protocol version: {}".format(actual_version))
in results.stdout
)
assert random_bytes[1:] in results.stdout
| {
"content_hash": "7d7b771c6744ed11a8fa3ff79a60ced5",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 120,
"avg_line_length": 43.264150943396224,
"alnum_prop": 0.6864369821194941,
"repo_name": "PKRoma/s2n",
"id": "769f9523cc3da19ff1b79d5b247a7ac36e1c6024",
"size": "6879",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integrationv2/test_version_negotiation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "49171"
},
{
"name": "C",
"bytes": "7077615"
},
{
"name": "C++",
"bytes": "34247"
},
{
"name": "CMake",
"bytes": "34210"
},
{
"name": "CSS",
"bytes": "8130"
},
{
"name": "Coq",
"bytes": "29938"
},
{
"name": "Dockerfile",
"bytes": "3350"
},
{
"name": "Java",
"bytes": "3240"
},
{
"name": "Makefile",
"bytes": "307356"
},
{
"name": "Perl",
"bytes": "4252"
},
{
"name": "Python",
"bytes": "503940"
},
{
"name": "Ruby",
"bytes": "6523"
},
{
"name": "Rust",
"bytes": "120852"
},
{
"name": "Shell",
"bytes": "144346"
}
],
"symlink_target": ""
} |
import sys
from django.shortcuts import render
from circuits.models import Provider, Circuit
from dcim.models import Site, Rack, Device, ConsolePort, PowerPort, InterfaceConnection
from extras.models import UserAction
from ipam.models import Aggregate, Prefix, IPAddress, VLAN, VRF
from secrets.models import Secret
from tenancy.models import Tenant
def home(request):
stats = {
# Organization
'site_count': Site.objects.count(),
'tenant_count': Tenant.objects.count(),
# DCIM
'rack_count': Rack.objects.count(),
'device_count': Device.objects.count(),
'interface_connections_count': InterfaceConnection.objects.count(),
'console_connections_count': ConsolePort.objects.filter(cs_port__isnull=False).count(),
'power_connections_count': PowerPort.objects.filter(power_outlet__isnull=False).count(),
# IPAM
'vrf_count': VRF.objects.count(),
'aggregate_count': Aggregate.objects.count(),
'prefix_count': Prefix.objects.count(),
'ipaddress_count': IPAddress.objects.count(),
'vlan_count': VLAN.objects.count(),
# Circuits
'provider_count': Provider.objects.count(),
'circuit_count': Circuit.objects.count(),
# Secrets
'secret_count': Secret.objects.count(),
}
return render(request, 'home.html', {
'stats': stats,
'recent_activity': UserAction.objects.select_related('user')[:50]
})
def trigger_500(request):
"""Hot-wired method of triggering a server error to test reporting."""
raise Exception("Congratulations, you've triggered an exception! Go tell all your friends what an exceptional "
"person you are.")
def handle_500(request):
"""Custom server error handler"""
type_, error, traceback = sys.exc_info()
return render(request, '500.html', {
'exception': str(type_),
'error': error,
}, status=500)
| {
"content_hash": "2a7877b84ad6ea4e5ed0000a7a12ab79",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 115,
"avg_line_length": 31.741935483870968,
"alnum_prop": 0.6529471544715447,
"repo_name": "rfdrake/netbox",
"id": "2da97a2cf23948a08ed38659419cd8e3402c0740",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netbox/netbox/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157535"
},
{
"name": "HTML",
"bytes": "328897"
},
{
"name": "JavaScript",
"bytes": "12423"
},
{
"name": "Nginx",
"bytes": "774"
},
{
"name": "Python",
"bytes": "593223"
},
{
"name": "Shell",
"bytes": "3080"
}
],
"symlink_target": ""
} |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import os
import sys
import subprocess
# set path to common libraries
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/common")
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd11_api_lookups(fit_common.unittest.TestCase):
def setUp(self):
# delete any instance of test lookup
api_data = fit_common.rackhdapi("/api/2.0/lookups")
for item in api_data['json']:
if item['macAddress'] == "00:0a:0a:0a:0a:0a":
fit_common.rackhdapi("/api/2.0/lookups/" + item['id'], action="delete")
def test_api_11_lookups_ID(self):
api_data = fit_common.rackhdapi("/api/1.1/lookups")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
self.assertEqual(fit_common.rackhdapi("/api/1.1/lookups/" + item['id'])
['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# this test cross-references node MAC addresses to lookup tables
def test_api_11_lookups_cross_reference(self):
nodecatalog = fit_common.rackhdapi("/api/1.1/nodes")['json']
lookuptable = fit_common.rackhdapi("/api/1.1/lookups")['json']
errorlist = ""
for node in nodecatalog:
# get list of compute nodes with sku
if node['type'] == "compute" and 'sku' in node and 'identifiers' in node:
# find node entry mac addresses
for macaddr in node['identifiers']:
# find mac address in lookup table
for lookupid in lookuptable:
#verify node ID for mac address
if macaddr in lookupid['macAddress']:
if fit_common.VERBOSITY >= 2:
print "*** Checking Node ID: " + node['id'] + " MAC: " + macaddr
if 'node' not in lookupid:
errorlist = errorlist + "Missing node ID: " + node['id'] + " MAC: " + macaddr + "\n"
if node['id'] != lookupid['node']:
errorlist = errorlist + "Wrong node in lookup table ID: " + lookupid['id'] + "\n"
if errorlist != "":
print "**** Lookup Errors:"
print errorlist
self.assertEqual(errorlist, "", "Errors in lookup table detected.")
def test_api_11_lookups_post_get_delete(self):
node = fit_common.node_select()[0]
data_payload = {
"macAddress": "00:0a:0a:0a:0a:0a",
"ipAddress": "128.128.128.128",
"node": node
}
api_data = fit_common.rackhdapi("/api/1.1/lookups", action="post", payload=data_payload)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
lookup_id = api_data['json']['id']
api_data = fit_common.rackhdapi("/api/1.1/lookups/" + lookup_id)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertEqual(api_data['json']['macAddress'], "00:0a:0a:0a:0a:0a", "Bad lookup MAC Address")
self.assertEqual(api_data['json']['ipAddress'], "128.128.128.128", "Bad lookup IP Address")
self.assertEqual(api_data['json']['node'], node, "Bad lookup node ID")
api_data = fit_common.rackhdapi("/api/1.1/lookups/" + lookup_id, action="delete")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
| {
"content_hash": "eed3b5eaae16f96569d308582b2c99a8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 125,
"avg_line_length": 50.61538461538461,
"alnum_prop": 0.5802938196555217,
"repo_name": "tldavies/RackHD",
"id": "69aed20d6406db8b676a2e2781db4952fd7dc4f0",
"size": "3948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/tests/rackhd11/test_rackhd11_api_lookups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "706855"
},
{
"name": "Ruby",
"bytes": "6949"
},
{
"name": "Shell",
"bytes": "38109"
}
],
"symlink_target": ""
} |
from collections import MutableMapping
import ctypes as ct
from functools import reduce
import multiprocessing
import os
import errno
import re
from .libbcc import lib, _RAW_CB_TYPE, _LOST_CB_TYPE
from .perf import Perf
from .utils import get_online_cpus
from .utils import get_possible_cpus
from subprocess import check_output
BPF_MAP_TYPE_HASH = 1
BPF_MAP_TYPE_ARRAY = 2
BPF_MAP_TYPE_PROG_ARRAY = 3
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
BPF_MAP_TYPE_PERCPU_HASH = 5
BPF_MAP_TYPE_PERCPU_ARRAY = 6
BPF_MAP_TYPE_STACK_TRACE = 7
BPF_MAP_TYPE_CGROUP_ARRAY = 8
BPF_MAP_TYPE_LRU_HASH = 9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
BPF_MAP_TYPE_LPM_TRIE = 11
BPF_MAP_TYPE_ARRAY_OF_MAPS = 12
BPF_MAP_TYPE_HASH_OF_MAPS = 13
BPF_MAP_TYPE_DEVMAP = 14
BPF_MAP_TYPE_SOCKMAP = 15
BPF_MAP_TYPE_CPUMAP = 16
BPF_MAP_TYPE_XSKMAP = 17
BPF_MAP_TYPE_SOCKHASH = 18
map_type_name = {BPF_MAP_TYPE_HASH: "HASH",
BPF_MAP_TYPE_ARRAY: "ARRAY",
BPF_MAP_TYPE_PROG_ARRAY: "PROG_ARRAY",
BPF_MAP_TYPE_PERF_EVENT_ARRAY: "PERF_EVENT_ARRAY",
BPF_MAP_TYPE_PERCPU_HASH: "PERCPU_HASH",
BPF_MAP_TYPE_PERCPU_ARRAY: "PERCPU_ARRAY",
BPF_MAP_TYPE_STACK_TRACE: "STACK_TRACE",
BPF_MAP_TYPE_CGROUP_ARRAY: "CGROUP_ARRAY",
BPF_MAP_TYPE_LRU_HASH: "LRU_HASH",
BPF_MAP_TYPE_LRU_PERCPU_HASH: "LRU_PERCPU_HASH",
BPF_MAP_TYPE_LPM_TRIE: "LPM_TRIE",
BPF_MAP_TYPE_ARRAY_OF_MAPS: "ARRAY_OF_MAPS",
BPF_MAP_TYPE_HASH_OF_MAPS: "HASH_OF_MAPS",
BPF_MAP_TYPE_DEVMAP: "DEVMAP",
BPF_MAP_TYPE_SOCKMAP: "SOCKMAP",
BPF_MAP_TYPE_CPUMAP: "CPUMAP",
BPF_MAP_TYPE_XSKMAP: "XSKMAP",
BPF_MAP_TYPE_SOCKHASH: "SOCKHASH",}
stars_max = 40
log2_index_max = 65
linear_index_max = 1025
# helper functions, consider moving these to a utils module
def _stars(val, val_max, width):
i = 0
text = ""
while (1):
if (i > (width * val / val_max) - 1) or (i > width - 1):
break
text += "*"
i += 1
if val > val_max:
text = text[:-1] + "+"
return text
def _print_log2_hist(vals, val_type, strip_leading_zero):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
if idx_max <= 32:
header = " %-19s : count distribution"
body = "%10d -> %-10d : %-8d |%-*s|"
stars = stars_max
else:
header = " %-29s : count distribution"
body = "%20d -> %-20d : %-8d |%-*s|"
stars = int(stars_max / 2)
if idx_max > 0:
print(header % val_type)
for i in range(1, idx_max + 1):
low = (1 << i) >> 1
high = (1 << i) - 1
if (low == high):
low -= 1
val = vals[i]
if strip_leading_zero:
if val:
print(body % (low, high, val, stars,
_stars(val, val_max, stars)))
strip_leading_zero = False
else:
print(body % (low, high, val, stars,
_stars(val, val_max, stars)))
def _print_linear_hist(vals, val_type):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
header = " %-13s : count distribution"
body = " %-10d : %-8d |%-*s|"
stars = stars_max
if idx_max >= 0:
print(header % val_type);
for i in range(0, idx_max + 1):
val = vals[i]
print(body % (i, val, stars,
_stars(val, val_max, stars)))
def get_table_type_name(ttype):
try:
return map_type_name[ttype]
except KeyError:
return "<unknown>"
def Table(bpf, map_id, map_fd, keytype, leaftype, name, **kwargs):
"""Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
Create a python object out of a reference to a bpf table handle"""
ttype = lib.bpf_table_type_id(bpf.module, map_id)
t = None
if ttype == BPF_MAP_TYPE_HASH:
t = HashTable(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_ARRAY:
t = Array(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PROG_ARRAY:
t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype, name)
elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_LPM_TRIE:
t = LpmTrie(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_STACK_TRACE:
t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_HASH:
t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_CGROUP_ARRAY:
t = CgroupArray(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_DEVMAP:
t = DevMap(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_CPUMAP:
t = CpuMap(bpf, map_id, map_fd, keytype, leaftype)
if t == None:
raise Exception("Unknown table type %d" % ttype)
return t
class TableBase(MutableMapping):
def __init__(self, bpf, map_id, map_fd, keytype, leaftype, name=None):
self.bpf = bpf
self.map_id = map_id
self.map_fd = map_fd
self.Key = keytype
self.Leaf = leaftype
self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
self._cbs = {}
self._name = name
def key_sprintf(self, key):
buf = ct.create_string_buffer(ct.sizeof(self.Key) * 8)
res = lib.bpf_table_key_snprintf(self.bpf.module, self.map_id, buf,
len(buf), ct.byref(key))
if res < 0:
raise Exception("Could not printf key")
return buf.value
def leaf_sprintf(self, leaf):
buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id, buf,
len(buf), ct.byref(leaf))
if res < 0:
raise Exception("Could not printf leaf")
return buf.value
def key_scanf(self, key_str):
key = self.Key()
res = lib.bpf_table_key_sscanf(self.bpf.module, self.map_id, key_str,
ct.byref(key))
if res < 0:
raise Exception("Could not scanf key")
return key
def leaf_scanf(self, leaf_str):
leaf = self.Leaf()
res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id, leaf_str,
ct.byref(leaf))
if res < 0:
raise Exception("Could not scanf leaf")
return leaf
def __getitem__(self, key):
leaf = self.Leaf()
res = lib.bpf_lookup_elem(self.map_fd, ct.byref(key), ct.byref(leaf))
if res < 0:
raise KeyError
return leaf
def __setitem__(self, key, leaf):
res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Could not update table: %s" % errstr)
def __delitem__(self, key):
res = lib.bpf_delete_elem(self.map_fd, ct.byref(key))
if res < 0:
raise KeyError
# override the MutableMapping's implementation of these since they
# don't handle KeyError nicely
def itervalues(self):
for key in self:
# a map entry may be deleted in between discovering the key and
# fetching the value, suppress such errors
try:
yield self[key]
except KeyError:
pass
def iteritems(self):
for key in self:
try:
yield (key, self[key])
except KeyError:
pass
def items(self):
return [item for item in self.iteritems()]
def values(self):
return [value for value in self.itervalues()]
def clear(self):
# default clear uses popitem, which can race with the bpf prog
for k in self.keys():
self.__delitem__(k)
def zero(self):
# Even though this is not very efficient, we grab the entire list of
# keys before enumerating it. This helps avoid a potential race where
# the leaf assignment changes a hash table bucket that is being
# enumerated by the same loop, and may lead to a hang.
for k in list(self.keys()):
self[k] = self.Leaf()
def __iter__(self):
return TableBase.Iter(self)
def iter(self): return self.__iter__()
def keys(self): return self.__iter__()
class Iter(object):
def __init__(self, table):
self.table = table
self.key = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.key = self.table.next(self.key)
return self.key
def next(self, key):
next_key = self.Key()
if key is None:
res = lib.bpf_get_first_key(self.map_fd, ct.byref(next_key),
ct.sizeof(self.Key))
else:
res = lib.bpf_get_next_key(self.map_fd, ct.byref(key),
ct.byref(next_key))
if res < 0:
raise StopIteration()
return next_key
def print_log2_hist(self, val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None, strip_leading_zero=None,
bucket_sort_fn=None):
"""print_log2_hist(val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None,
strip_leading_zero=None, bucket_sort_fn=None):
Prints a table as a log2 histogram. The table must be stored as
log2. The val_type argument is optional, and is a column header.
If the histogram has a secondary key, multiple tables will print
and section_header can be used as a header description for each.
If section_print_fn is not None, it will be passed the bucket value
to format into a string as it sees fit. If bucket_fn is not None,
it will be used to produce a bucket value for the histogram keys.
If the value of strip_leading_zero is not False, prints a histogram
that is omitted leading zeros from the beginning.
If bucket_sort_fn is not None, it will be used to sort the buckets
before iterating them, and it is useful when there are multiple fields
in the secondary key.
The maximum index allowed is log2_index_max (65), which will
accommodate any 64-bit integer in the histogram.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
# The above code assumes that self.Key._fields_[1][0] holds the
# slot. But a padding member may have been inserted here, which
# breaks the assumption and leads to chaos.
# TODO: this is a quick fix. Fixing/working around in the BCC
# internal library is the right thing to do.
if f2 == '__pad_1' and len(self.Key._fields_) == 3:
f2 = self.Key._fields_[2][0]
for k, v in self.items():
bucket = getattr(k, f1)
if bucket_fn:
bucket = bucket_fn(bucket)
vals = tmp[bucket] = tmp.get(bucket, [0] * log2_index_max)
slot = getattr(k, f2)
vals[slot] = v.value
buckets = list(tmp.keys())
if bucket_sort_fn:
buckets = bucket_sort_fn(buckets)
for bucket in buckets:
vals = tmp[bucket]
if section_print_fn:
print("\n%s = %s" % (section_header,
section_print_fn(bucket)))
else:
print("\n%s = %r" % (section_header, bucket))
_print_log2_hist(vals, val_type, strip_leading_zero)
else:
vals = [0] * log2_index_max
for k, v in self.items():
vals[k.value] = v.value
_print_log2_hist(vals, val_type, strip_leading_zero)
def print_linear_hist(self, val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None, bucket_sort_fn=None):
"""print_linear_hist(val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None,
bucket_sort_fn=None)
Prints a table as a linear histogram. This is intended to span integer
ranges, eg, from 0 to 100. The val_type argument is optional, and is a
column header. If the histogram has a secondary key, multiple tables
will print and section_header can be used as a header description for
each. If section_print_fn is not None, it will be passed the bucket
value to format into a string as it sees fit. If bucket_fn is not None,
it will be used to produce a bucket value for the histogram keys.
If bucket_sort_fn is not None, it will be used to sort the buckets
before iterating them, and it is useful when there are multiple fields
in the secondary key.
The maximum index allowed is linear_index_max (1025), which is hoped
to be sufficient for integer ranges spanned.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
for k, v in self.items():
bucket = getattr(k, f1)
if bucket_fn:
bucket = bucket_fn(bucket)
vals = tmp[bucket] = tmp.get(bucket, [0] * linear_index_max)
slot = getattr(k, f2)
vals[slot] = v.value
buckets = tmp.keys()
if bucket_sort_fn:
buckets = bucket_sort_fn(buckets)
for bucket in buckets:
vals = tmp[bucket]
if section_print_fn:
print("\n%s = %s" % (section_header,
section_print_fn(bucket)))
else:
print("\n%s = %r" % (section_header, bucket))
_print_linear_hist(vals, val_type)
else:
vals = [0] * linear_index_max
for k, v in self.items():
try:
vals[k.value] = v.value
except IndexError:
# Improve error text. If the limit proves a nusiance, this
# function be rewritten to avoid having one.
raise IndexError(("Index in print_linear_hist() of %d " +
"exceeds max of %d.") % (k.value, linear_index_max))
_print_linear_hist(vals, val_type)
class HashTable(TableBase):
def __init__(self, *args, **kwargs):
super(HashTable, self).__init__(*args, **kwargs)
def __len__(self):
i = 0
for k in self: i += 1
return i
class LruHash(HashTable):
def __init__(self, *args, **kwargs):
super(LruHash, self).__init__(*args, **kwargs)
class ArrayBase(TableBase):
def __init__(self, *args, **kwargs):
super(ArrayBase, self).__init__(*args, **kwargs)
self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
self.map_id))
def _normalize_key(self, key):
if isinstance(key, int):
if key < 0:
key = len(self) + key
key = self.Key(key)
if not isinstance(key, ct._SimpleCData):
raise IndexError("Array index must be an integer type")
if key.value >= len(self):
raise IndexError("Array index out of range")
return key
def __len__(self):
return self.max_entries
def __getitem__(self, key):
key = self._normalize_key(key)
return super(ArrayBase, self).__getitem__(key)
def __setitem__(self, key, leaf):
key = self._normalize_key(key)
super(ArrayBase, self).__setitem__(key, leaf)
def __delitem__(self, key):
key = self._normalize_key(key)
super(ArrayBase, self).__delitem__(key)
def clearitem(self, key):
key = self._normalize_key(key)
leaf = self.Leaf()
res = lib.bpf_update_elem(self.map_fd, ct.byref(key), ct.byref(leaf), 0)
if res < 0:
raise Exception("Could not clear item")
def __iter__(self):
return ArrayBase.Iter(self, self.Key)
class Iter(object):
def __init__(self, table, keytype):
self.Key = keytype
self.table = table
self.i = -1
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.i += 1
if self.i == len(self.table):
raise StopIteration()
return self.Key(self.i)
class Array(ArrayBase):
def __init__(self, *args, **kwargs):
super(Array, self).__init__(*args, **kwargs)
def __delitem__(self, key):
# Delete in Array type does not have an effect, so zero out instead
self.clearitem(key)
class ProgArray(ArrayBase):
def __init__(self, *args, **kwargs):
super(ProgArray, self).__init__(*args, **kwargs)
def __setitem__(self, key, leaf):
if isinstance(leaf, int):
leaf = self.Leaf(leaf)
if isinstance(leaf, self.bpf.Function):
leaf = self.Leaf(leaf.fd)
super(ProgArray, self).__setitem__(key, leaf)
class FileDesc:
def __init__(self, fd):
if (fd is None) or (fd < 0):
raise Exception("Invalid file descriptor")
self.fd = fd
def clean_up(self):
if (self.fd is not None) and (self.fd >= 0):
os.close(self.fd)
self.fd = None
def __del__(self):
self.clean_up()
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
self.clean_up()
class CgroupArray(ArrayBase):
def __init__(self, *args, **kwargs):
super(CgroupArray, self).__init__(*args, **kwargs)
def __setitem__(self, key, leaf):
if isinstance(leaf, int):
super(CgroupArray, self).__setitem__(key, self.Leaf(leaf))
elif isinstance(leaf, str):
# TODO: Add os.O_CLOEXEC once we move to Python version >3.3
with FileDesc(os.open(leaf, os.O_RDONLY)) as f:
super(CgroupArray, self).__setitem__(key, self.Leaf(f.fd))
else:
raise Exception("Cgroup array key must be either FD or cgroup path")
class PerfEventArray(ArrayBase):
def __init__(self, *args, **kwargs):
super(PerfEventArray, self).__init__(*args, **kwargs)
self._open_key_fds = {}
self._event_class = None
def __del__(self):
keys = list(self._open_key_fds.keys())
for key in keys:
del self[key]
def __delitem__(self, key):
if key not in self._open_key_fds:
return
# Delete entry from the array
super(PerfEventArray, self).__delitem__(key)
key_id = (id(self), key)
if key_id in self.bpf.perf_buffers:
# The key is opened for perf ring buffer
lib.perf_reader_free(self.bpf.perf_buffers[key_id])
del self.bpf.perf_buffers[key_id]
del self._cbs[key]
else:
# The key is opened for perf event read
lib.bpf_close_perf_event_fd(self._open_key_fds[key])
del self._open_key_fds[key]
def _get_event_class(self):
ct_mapping = { 'char' : ct.c_char,
's8' : ct.c_char,
'unsigned char' : ct.c_ubyte,
'u8' : ct.c_ubyte,
'u8 *' : ct.c_char_p,
'char *' : ct.c_char_p,
'short' : ct.c_short,
's16' : ct.c_short,
'unsigned short' : ct.c_ushort,
'u16' : ct.c_ushort,
'int' : ct.c_int,
's32' : ct.c_int,
'enum' : ct.c_int,
'unsigned int' : ct.c_uint,
'u32' : ct.c_uint,
'long' : ct.c_long,
'unsigned long' : ct.c_ulong,
'long long' : ct.c_longlong,
's64' : ct.c_longlong,
'unsigned long long': ct.c_ulonglong,
'u64' : ct.c_ulonglong,
'__int128' : (ct.c_longlong * 2),
'unsigned __int128' : (ct.c_ulonglong * 2),
'void *' : ct.c_void_p }
# handle array types e.g. "int [16] foo"
array_type = re.compile(r"(.+) \[([0-9]+)\]$")
fields = []
num_fields = lib.bpf_perf_event_fields(self.bpf.module, self._name)
i = 0
while i < num_fields:
field = lib.bpf_perf_event_field(self.bpf.module, self._name, i)
m = re.match(r"(.*)#(.*)", field)
field_name = m.group(1)
field_type = m.group(2)
if re.match(r"enum .*", field_type):
field_type = "enum"
m = array_type.match(field_type)
try:
if m:
fields.append((field_name, ct_mapping[m.group(1)] * int(m.group(2))))
else:
fields.append((field_name, ct_mapping[field_type]))
except KeyError:
print("Type: '%s' not recognized. Please define the data with ctypes manually."
% field_type)
exit()
i += 1
return type('', (ct.Structure,), {'_fields_': fields})
def event(self, data):
"""event(data)
When ring buffers are opened to receive custom perf event,
the underlying event data struct which is defined in C in
the BPF program can be deduced via this function. This avoids
redundant definitions in Python.
"""
if self._event_class == None:
self._event_class = self._get_event_class()
return ct.cast(data, ct.POINTER(self._event_class)).contents
def open_perf_buffer(self, callback, page_cnt=8, lost_cb=None):
"""open_perf_buffers(callback)
Opens a set of per-cpu ring buffer to receive custom perf event
data from the bpf program. The callback will be invoked for each
event submitted from the kernel, up to millions per second. Use
page_cnt to change the size of the per-cpu ring buffer. The value
must be a power of two and defaults to 8.
"""
if page_cnt & (page_cnt - 1) != 0:
raise Exception("Perf buffer page_cnt must be a power of two")
for i in get_online_cpus():
self._open_perf_buffer(i, callback, page_cnt, lost_cb)
def _open_perf_buffer(self, cpu, callback, page_cnt, lost_cb):
def raw_cb_(_, data, size):
try:
callback(cpu, data, size)
except IOError as e:
if e.errno == errno.EPIPE:
exit()
else:
raise e
def lost_cb_(_, lost):
try:
lost_cb(lost)
except IOError as e:
if e.errno == errno.EPIPE:
exit()
else:
raise e
fn = _RAW_CB_TYPE(raw_cb_)
lost_fn = _LOST_CB_TYPE(lost_cb_) if lost_cb else ct.cast(None, _LOST_CB_TYPE)
reader = lib.bpf_open_perf_buffer(fn, lost_fn, None, -1, cpu, page_cnt)
if not reader:
raise Exception("Could not open perf buffer")
fd = lib.perf_reader_fd(reader)
self[self.Key(cpu)] = self.Leaf(fd)
self.bpf.perf_buffers[(id(self), cpu)] = reader
# keep a refcnt
self._cbs[cpu] = (fn, lost_fn)
# The actual fd is held by the perf reader, add to track opened keys
self._open_key_fds[cpu] = -1
def _open_perf_event(self, cpu, typ, config):
fd = lib.bpf_open_perf_event(typ, config, -1, cpu)
if fd < 0:
raise Exception("bpf_open_perf_event failed")
self[self.Key(cpu)] = self.Leaf(fd)
self._open_key_fds[cpu] = fd
def open_perf_event(self, typ, config):
"""open_perf_event(typ, config)
Configures the table such that calls from the bpf program to
table.perf_read(CUR_CPU_IDENTIFIER) will return the hardware
counter denoted by event ev on the local cpu.
"""
for i in get_online_cpus():
self._open_perf_event(i, typ, config)
class PerCpuHash(HashTable):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
super(PerCpuHash, self).__init__(*args, **kwargs)
self.sLeaf = self.Leaf
self.total_cpu = len(get_possible_cpus())
# This needs to be 8 as hard coded into the linux kernel.
self.alignment = ct.sizeof(self.sLeaf) % 8
if self.alignment is 0:
self.Leaf = self.sLeaf * self.total_cpu
else:
# Currently Float, Char, un-aligned structs are not supported
if self.sLeaf == ct.c_uint:
self.Leaf = ct.c_uint64 * self.total_cpu
elif self.sLeaf == ct.c_int:
self.Leaf = ct.c_int64 * self.total_cpu
else:
raise IndexError("Leaf must be aligned to 8 bytes")
def getvalue(self, key):
result = super(PerCpuHash, self).__getitem__(key)
if self.alignment is 0:
ret = result
else:
ret = (self.sLeaf * self.total_cpu)()
for i in range(0, self.total_cpu):
ret[i] = result[i]
return ret
def __getitem__(self, key):
if self.reducer:
return reduce(self.reducer, self.getvalue(key))
else:
return self.getvalue(key)
def __setitem__(self, key, leaf):
super(PerCpuHash, self).__setitem__(key, leaf)
def sum(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default sum functions")
return self.sLeaf(sum(self.getvalue(key)))
def max(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default max functions")
return self.sLeaf(max(self.getvalue(key)))
def average(self, key):
result = self.sum(key)
return result.value / self.total_cpu
class LruPerCpuHash(PerCpuHash):
def __init__(self, *args, **kwargs):
super(LruPerCpuHash, self).__init__(*args, **kwargs)
class PerCpuArray(ArrayBase):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
super(PerCpuArray, self).__init__(*args, **kwargs)
self.sLeaf = self.Leaf
self.total_cpu = len(get_possible_cpus())
# This needs to be 8 as hard coded into the linux kernel.
self.alignment = ct.sizeof(self.sLeaf) % 8
if self.alignment is 0:
self.Leaf = self.sLeaf * self.total_cpu
else:
# Currently Float, Char, un-aligned structs are not supported
if self.sLeaf == ct.c_uint:
self.Leaf = ct.c_uint64 * self.total_cpu
elif self.sLeaf == ct.c_int:
self.Leaf = ct.c_int64 * self.total_cpu
else:
raise IndexError("Leaf must be aligned to 8 bytes")
def getvalue(self, key):
result = super(PerCpuArray, self).__getitem__(key)
if self.alignment is 0:
ret = result
else:
ret = (self.sLeaf * self.total_cpu)()
for i in range(0, self.total_cpu):
ret[i] = result[i]
return ret
def __getitem__(self, key):
if (self.reducer):
return reduce(self.reducer, self.getvalue(key))
else:
return self.getvalue(key)
def __setitem__(self, key, leaf):
super(PerCpuArray, self).__setitem__(key, leaf)
def __delitem__(self, key):
# Delete in this type does not have an effect, so zero out instead
self.clearitem(key)
def sum(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default sum functions")
return self.sLeaf(sum(self.getvalue(key)))
def max(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default max functions")
return self.sLeaf(max(self.getvalue(key)))
def average(self, key):
result = self.sum(key)
return result.value / self.total_cpu
class LpmTrie(TableBase):
def __init__(self, *args, **kwargs):
super(LpmTrie, self).__init__(*args, **kwargs)
def __len__(self):
raise NotImplementedError
class StackTrace(TableBase):
MAX_DEPTH = 127
BPF_F_STACK_BUILD_ID = (1<<5)
BPF_STACK_BUILD_ID_EMPTY = 0 #can't get stacktrace
BPF_STACK_BUILD_ID_VALID = 1 #valid build-id,ip
BPF_STACK_BUILD_ID_IP = 2 #fallback to ip
def __init__(self, *args, **kwargs):
super(StackTrace, self).__init__(*args, **kwargs)
class StackWalker(object):
def __init__(self, stack, flags, resolve=None):
self.stack = stack
self.n = -1
self.resolve = resolve
self.flags = flags
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.n += 1
if self.n == StackTrace.MAX_DEPTH:
raise StopIteration()
if self.flags & StackTrace.BPF_F_STACK_BUILD_ID:
addr = self.stack.trace[self.n]
if addr.status == StackTrace.BPF_STACK_BUILD_ID_IP or \
addr.status == StackTrace.BPF_STACK_BUILD_ID_EMPTY:
raise StopIteration()
else:
addr = self.stack.ip[self.n]
if addr == 0 :
raise StopIteration()
return self.resolve(addr) if self.resolve else addr
def walk(self, stack_id, resolve=None):
return StackTrace.StackWalker(self[self.Key(stack_id)], self.flags, resolve)
def __len__(self):
i = 0
for k in self: i += 1
return i
def clear(self):
pass
class DevMap(ArrayBase):
def __init__(self, *args, **kwargs):
super(DevMap, self).__init__(*args, **kwargs)
class CpuMap(ArrayBase):
def __init__(self, *args, **kwargs):
super(CpuMap, self).__init__(*args, **kwargs)
| {
"content_hash": "d9e111ed7419c6ce08be1830d0438fcc",
"timestamp": "",
"source": "github",
"line_count": 885,
"max_line_length": 95,
"avg_line_length": 36.31299435028249,
"alnum_prop": 0.5374490462706538,
"repo_name": "mcaleavya/bcc",
"id": "d33d46eb017dbd0d5b1f8b89d13e2fb39ca198d5",
"size": "32706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/bcc/table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "321768"
},
{
"name": "C++",
"bytes": "920975"
},
{
"name": "CMake",
"bytes": "38841"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "298149"
},
{
"name": "Makefile",
"bytes": "1481"
},
{
"name": "P4",
"bytes": "9242"
},
{
"name": "Python",
"bytes": "1206933"
},
{
"name": "Shell",
"bytes": "17023"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
} |
from .basic_dataset_profiler import BasicDatasetProfiler
from .basic_suite_builder_profiler import BasicSuiteBuilderProfiler
from .columns_exist import ColumnsExistProfiler
| {
"content_hash": "9c97099f1b37a11951e15087b2154f07",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 67,
"avg_line_length": 57.666666666666664,
"alnum_prop": 0.8786127167630058,
"repo_name": "great-expectations/great_expectations",
"id": "6f844ad0fe90a58ab65a4fdae7c6d881495f67d9",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "great_expectations/profile/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
import codecs
import os
import re
import sys
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
]
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def main():
python_version = sys.version_info[:2]
install_requires = [
'virtualenv>=12.0,<13.0',
]
if python_version < (2, 7) or (3, 0) <= python_version <= (3, 1):
install_requires += ['argparse']
setup(
name='terrarium',
version=find_version('terrarium', '__init__.py'),
author='Kyle Gibson',
author_email='kyle.gibson@frozenonline.com',
description='Package and ship relocatable python virtualenvs',
license='BSD',
url='http://github.com/policystat/terrarium',
packages=['terrarium'],
long_description=read('README.rst'),
install_requires=install_requires,
entry_points={
'console_scripts':
['terrarium = terrarium.terrarium:main']
},
classifiers=classifiers,
zip_safe=False,
)
if __name__ == '__main__':
main()
| {
"content_hash": "43fb56a7af3093e2a7fb9b315c27315d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 72,
"avg_line_length": 28.183098591549296,
"alnum_prop": 0.5962018990504747,
"repo_name": "hangtwenty/terrarium",
"id": "4d5de5717ea9b40260ddfaaf036547349aaa709a",
"size": "2001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "50789"
}
],
"symlink_target": ""
} |
import os
from sketchbook import Sketch
_TEST_CURIO = bool(os.environ.get("TEST_CURIO", False))
if _TEST_CURIO:
from sketchbook import CurioSketchContext
from sketchbook.testutils import CurioTestHelper
helper = CurioTestHelper(__file__)
default_skt_ctx = CurioSketchContext()
else:
from sketchbook import AsyncioSketchContext
from sketchbook.testutils import AsyncioTestHelper
helper = AsyncioTestHelper(__file__)
default_skt_ctx = AsyncioSketchContext()
class OutputTestCase:
@helper.force_sync
async def test_html_escape(self) -> None:
skt = Sketch("Hello, <%html= a %>!", skt_ctx=default_skt_ctx)
assert (
await skt.draw(a="<h1>world</h1>")
== "Hello, <h1>world</h1>!"
)
@helper.force_sync
async def test_no_escape(self) -> None:
skt = Sketch("Hello, <%r= a %>!", skt_ctx=default_skt_ctx)
assert await skt.draw(a="<h1>world</h1>") == "Hello, <h1>world</h1>!"
@helper.force_sync
async def test_json_escape(self) -> None:
skt = Sketch('{"name": <%json= name %>}', skt_ctx=default_skt_ctx)
assert (
await skt.draw(name='{"name": "admin"}')
== '{"name": "{\\"name\\": \\"admin\\"}"}'
)
@helper.force_sync
async def test_url_escape(self) -> None:
skt = Sketch(
"https://www.example.com/?user=<%u= name %>",
skt_ctx=default_skt_ctx,
)
assert await skt.draw(name="a&redirect=https://www.example2.com/") == (
"https://www.example.com/?user=a%26redirect%3Dhttps%3A%2F%2F"
"www.example2.com%2F"
)
@helper.force_sync
async def test_url_without_plus_escape(self) -> None:
skt = Sketch(
"https://www.example.com/?user=<%url_without_plus= name %>",
skt_ctx=default_skt_ctx,
)
assert (
await skt.draw(name="John Smith")
== "https://www.example.com/?user=John%20Smith"
)
@helper.force_sync
async def test_custom_escape_fn(self) -> None:
def _number_fn(i: int) -> str:
return str(i + 1)
if _TEST_CURIO:
skt_ctx = CurioSketchContext(
custom_escape_fns={"number_plus_one": _number_fn}
)
else:
skt_ctx = AsyncioSketchContext(
custom_escape_fns={"number_plus_one": _number_fn}
)
skt = Sketch(
"The result is <% number_plus_one= a %>.", skt_ctx=skt_ctx
)
assert await skt.draw(a=12345) == "The result is 12346."
| {
"content_hash": "e6c78418fe1b7d6ffdcd2cc9119d260d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 29.56179775280899,
"alnum_prop": 0.5594830862789814,
"repo_name": "futursolo/sketchbook",
"id": "b62aeeb20bed45fe2744ec9a7da49e9fea8f9eee",
"size": "3280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "529"
},
{
"name": "Python",
"bytes": "82985"
}
],
"symlink_target": ""
} |
from ptm.factories.base import TemplatedAppFactory
class AppFactory(TemplatedAppFactory):
pass
| {
"content_hash": "c85444e9aa917d00077e86e8430405c7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 50,
"avg_line_length": 20.2,
"alnum_prop": 0.8217821782178217,
"repo_name": "GrivIN/ptm",
"id": "a354e58f985d2670c9c5bd647c7573c27ad5b281",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ptm/templates/python/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20916"
}
],
"symlink_target": ""
} |
import logging
import unittest
import MySQLdb
import environment
import tablet
import utils
use_mysqlctld = True
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
setup_procs = []
def setUpModule():
try:
environment.topo_server().setup()
# start mysql instance external to the test
global setup_procs
setup_procs = [
tablet_master.init_mysql(),
tablet_replica1.init_mysql(),
tablet_replica2.init_mysql(),
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
if use_mysqlctld:
# Try to terminate mysqlctld gracefully, so it kills its mysqld.
for proc in setup_procs:
utils.kill_sub_process(proc, soft=True)
teardown_procs = setup_procs
else:
teardown_procs = [
tablet_master.teardown_mysql(),
tablet_replica1.teardown_mysql(),
tablet_replica2.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
class TestBackup(unittest.TestCase):
def setUp(self):
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
tablet_master.init_tablet('master', 'test_keyspace', '0', start=True,
supports_backups=True)
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_master.tablet_alias])
def tearDown(self):
for t in tablet_master, tablet_replica1:
t.kill_vttablet()
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
for backup in self._list_backups():
self._remove_backup(backup)
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_data(self, t, index):
"""Add a single row with value 'index' to the given tablet."""
t.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def _check_data(self, t, count, msg):
"""Check that the specified tablet has the expected number of rows."""
timeout = 10
while True:
try:
result = t.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == count:
break
except MySQLdb.DatabaseError:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step(msg, timeout)
def _restore(self, t):
"""Erase mysql/tablet dir, then start tablet with restore enabled."""
self._reset_tablet_dir(t)
t.start_vttablet(wait_for_state='SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True)
def _reset_tablet_dir(self, t):
"""Stop mysql, delete everything including tablet dir, restart mysql."""
utils.wait_procs([t.teardown_mysql()])
t.remove_tree()
proc = t.init_mysql()
if use_mysqlctld:
t.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
def _list_backups(self):
"""Get a list of backup names for the test shard."""
backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
return backups.splitlines()
def _remove_backup(self, backup):
"""Remove a named backup from the test shard."""
utils.run_vtctl(
tablet.get_backup_storage_flags() +
['RemoveBackup', 'test_keyspace/0', backup],
auto_log=True, mode=utils.VTCTL_VTCTL)
def test_backup(self):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- insert some data
- take a backup
- insert more data on the master
- bring up tablet_replica2 after the fact, let it restore the backup
- check all data is right (before+after backup data)
- list the backup, remove it
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# check that the backup shows up in the listing
backups = self._list_backups()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias))
# remove the backup and check that the list is empty
self._remove_backup(backups[0])
backups = self._list_backups()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
tablet_replica2.kill_vttablet()
def test_master_slave_same_backup(self):
"""Test a master and slave from the same backup.
Check that a slave and master both restored from the same backup
can replicate successfully.
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# Promote replica2 to master.
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/0',
tablet_replica2.tablet_alias])
# insert more data on replica2 (current master)
self._insert_data(tablet_replica2, 3)
# Force replica1 to restore from backup.
tablet_replica1.kill_vttablet()
self._restore(tablet_replica1)
# wait for replica1 to catch up.
self._check_data(tablet_replica1, 3,
'replica1 getting data from restored master')
tablet_replica2.kill_vttablet()
def _restore_old_master_test(self, restore_method):
"""Test that a former master replicates correctly after being restored.
- Take a backup.
- Reparent from old master to new master.
- Force old master to restore from a previous backup using restore_method.
Args:
restore_method: function accepting one parameter of type tablet.Tablet,
this function is called to force a restore on the provided tablet
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# reparent to replica1
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/0',
tablet_replica1.tablet_alias])
# insert more data on new master
self._insert_data(tablet_replica1, 3)
# force the old master to restore at the latest backup.
restore_method(tablet_master)
# wait for it to catch up.
self._check_data(tablet_master, 3, 'former master catches up after restore')
def test_restore_old_master(self):
def _restore_using_kill(t):
t.kill_vttablet()
self._restore(t)
self._restore_old_master_test(_restore_using_kill)
def test_in_place_restore(self):
def _restore_in_place(t):
utils.run_vtctl(['RestoreFromBackup', t.tablet_alias], auto_log=True)
self._restore_old_master_test(_restore_in_place)
if __name__ == '__main__':
utils.main()
| {
"content_hash": "be7045ba2ec65db73a6fa10f835d3055",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 80,
"avg_line_length": 32.12627986348123,
"alnum_prop": 0.6586635504090088,
"repo_name": "erzel/vitess",
"id": "7a231567b1173bc76e8ac01f84d073bc1339f112",
"size": "9436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/backup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10253"
},
{
"name": "CSS",
"bytes": "213341"
},
{
"name": "Go",
"bytes": "5244097"
},
{
"name": "HTML",
"bytes": "53738"
},
{
"name": "Java",
"bytes": "721903"
},
{
"name": "JavaScript",
"bytes": "41385"
},
{
"name": "Liquid",
"bytes": "6896"
},
{
"name": "Makefile",
"bytes": "7962"
},
{
"name": "PHP",
"bytes": "1001625"
},
{
"name": "Protocol Buffer",
"bytes": "99498"
},
{
"name": "Python",
"bytes": "867427"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "41201"
},
{
"name": "TypeScript",
"bytes": "134312"
},
{
"name": "Yacc",
"bytes": "21577"
}
],
"symlink_target": ""
} |
from antlr4 import InputStream, CommonTokenStream, ParseTreeWalker
from parse.MATLABLexer import MATLABLexer
from parse.MATLABParser import MATLABParser
from TranslateListener import TranslateListener
from error.ErrorListener import ParseErrorExceptionListener
from error.Errors import ParseError
def parse(in_str):
if in_str is None:
in_str = "function y = foo(x)\n"
chars = InputStream.InputStream(in_str)
lexer = MATLABLexer(chars)
tokens = CommonTokenStream(lexer)
parser = MATLABParser(tokens)
try:
# Remove existing console error listener
# NB: as of 20150708 pip install of antlr4 needs Recognizer.py to be patched
# to add the removeErrorListener methods
parser.removeErrorListeners()
except:
pass
# Throw if parse fails
parser.addErrorListener(ParseErrorExceptionListener.INSTANCE)
errorDispatch = parser.getErrorListenerDispatch()
tree = parser.fileDecl()
return tree
def translate(tree=None, string=None):
if tree == None:
tree = parse(string)
# Actually do the walking
evaluator = TranslateListener();
walker = ParseTreeWalker();
walker.walk(evaluator, tree);
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f:
in_str = f.read()
else:
in_str = None
translate(string= in_str)
| {
"content_hash": "93d6fbbcc7567330c396f2448fc1177b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 30.67391304347826,
"alnum_prop": 0.6853295535081503,
"repo_name": "mattmcd/ParseMATLAB",
"id": "982cdb8f46eb7ae96fd41f41d2014d9f135c2cd0",
"size": "1411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transmat/translate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "3744"
},
{
"name": "M",
"bytes": "40"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Matlab",
"bytes": "852"
},
{
"name": "Python",
"bytes": "4013"
}
],
"symlink_target": ""
} |
from Tkinter import *
import math # imported for use of sqrt function
from random import randint # imported for use of randint function
# the 2D point class
class Point(object):
# initialize the state of the 2D point
def __init__(self, x=0.0, y=0.0):
self._x = float(x)
self._y = float(y)
# Accessor and Mutators
# Getter
@property
def x(self):
return self._x
# Setter
@x.setter
def x(self, value):
self._x = value
# Getter
@property
def y(self):
return self._y
#Setter
@y.setter
def y(self, value):
self._y = value
# magic function for string representation of 2D point
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
# calculates the distance between two 2D points using the mathematical formula for calculating distance
def dist(self, other):
return "%s" % (math.sqrt( (self.x-other.x)**2 + (self.y-other.y)**2 ) )
# calculates the midpoint between two 2D points using the mathematical formula for calculating midpoint
def midpt(self, other):
return "(%s,%s)" % ( (self.x+other.x)/2, (self.y+other.y)/2 )
# the coordinate system class: (0,0) is in the top-left corner
# inherits from the Canvas class of Tkinter
class CoordinateSystem(Canvas):
# constructor
def __init__(self, master):
# initialize state of coordinate system using constructor of Canvas
Canvas.__init__(self, master, bg="white")
# expand coordinate system widget horizontally/vertically and fill up to maximize space used
self.pack(fill=BOTH, expand=1)
def plotPoints(self, numberOfPoints):
# plot y axis
for i in range(HEIGHT):
# keep horizontal position constant
self.plot( Point(WIDTH/2, i) )
# plot x axis
for i in range(WIDTH):
# keep vertical position constant
self.plot( Point(i, HEIGHT/2) )
# plot points on coordinate system numberOfPoints times
for i in range(numberOfPoints):
self.plot(Point( randint(0, WIDTH - 1), randint(0, HEIGHT - 1) ))
# create an X on the screen
for i in range(WIDTH):
# top left to bottom right
self.plot( Point(i, i) )
# top right to bottom left
self.plot( Point(WIDTH - i - 1, i) )
# plot one point on the coordinate system
def plot(self, point):
# randomly generate a color from the COLORS list
color = COLORS[ randint(0, len(COLORS) - 1) ]
# create a point (defined by top left to bottom right coordinates) on the coordinate system
self.create_oval(point.x, point.y, point.x + POINT_RADIUS * 2, point.y + POINT_RADIUS * 2, outline=color)
##########################################################
# ***DO NOT MODIFY OR REMOVE ANYTHING BELOW THIS POINT!***
# the default size of the canvas is 400x400
WIDTH = 400
HEIGHT = 400
# the default point radius is 0 pixels (i.e., no center to the oval)
POINT_RADIUS = 0
# colors to choose from when plotting points
COLORS = [ "black", "red", "green", "blue", "cyan", "yellow", "magenta" ]
# the number of points to plot
NUM_POINTS = 2500
# create the window
window = Tk()
window.geometry("{}x{}".format(WIDTH, HEIGHT))
window.title("2D Points...Plotted")
# create the coordinate system as a Tkinter canvas inside the window
s = CoordinateSystem(window)
# plot some random points
s.plotPoints(NUM_POINTS)
# wait for the window to close
window.mainloop()
| {
"content_hash": "c3d72d61c5adb23ebe873b469d0b239a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 107,
"avg_line_length": 30.504587155963304,
"alnum_prop": 0.6655639097744361,
"repo_name": "AbhishekShah212/School_Projects",
"id": "c4f0db885cdc039d292d290c62f3c11e92841722",
"size": "3711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSC132/plotted2DPoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2830"
},
{
"name": "C",
"bytes": "1877792"
},
{
"name": "C++",
"bytes": "43539"
},
{
"name": "Coq",
"bytes": "9595"
},
{
"name": "Makefile",
"bytes": "335539"
},
{
"name": "Objective-C",
"bytes": "6701"
},
{
"name": "Python",
"bytes": "1554527"
},
{
"name": "Shell",
"bytes": "1879"
},
{
"name": "Verilog",
"bytes": "55119"
}
],
"symlink_target": ""
} |
import sublime, sublime_plugin
class splittobufferCommand(sublime_plugin.TextCommand):
def run(self, edit):
sels = self.view.sel()
self.window = sublime.active_window()
self.make_buffer(sels)
def make_buffer(self, sels):
regions = []
orv = self.window.active_view()
syntax = self.view.settings().get('syntax')
for sel in sels:
buff = self.window.new_file()
clipboard = self.view.substr(sel)
bedit = buff.begin_edit()
buff.insert(bedit, 0, clipboard)
buff.end_edit(bedit)
buff.set_syntax_file(syntax)
regions.append(sel)
self.window.focus_view(orv)
regions.reverse()
for region in regions:
edit = self.view.begin_edit()
self.view.erase(edit, region)
self.view.end_edit(edit)
| {
"content_hash": "7ffc7e1894a725ebfe1b8d17ec0d6234",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 55,
"avg_line_length": 28,
"alnum_prop": 0.6964285714285714,
"repo_name": "berendbaas/sublime_splittobuffer",
"id": "c0b0df975ffd0b79a921dd4adfeca87714dfe245",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splittobuffer.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from eliot import Field, ActionType
from eliot._validation import ValidationError
from ipaddr import IPv4Address
def _system(name):
return u"flocker:route:" + name
def validate_ipv4_address(value):
if not isinstance(value, IPv4Address):
raise ValidationError(
value,
u"Field %s requires type to be IPv4Address (not %s)" % (
u"target_ip", type(value)))
def serialize_ipv4_address(address):
return unicode(address)
TARGET_IP = Field(
key=u"target_ip",
serializer=serialize_ipv4_address,
extraValidator=validate_ipv4_address,
description=u"The IP address which is the target of a proxy.")
TARGET_PORT = Field.forTypes(
u"target_port", [int],
u"The port number which is the target of a proxy.")
ARGV = Field.forTypes(
u"argv", [list],
u"The argument list of a child process being executed.")
IPTABLES = ActionType(
_system(u"iptables"),
[ARGV],
[],
u"An iptables command which Flocker is executing against the system.")
CREATE_PROXY_TO = ActionType(
_system(u"create_proxy_to"),
[TARGET_IP, TARGET_PORT],
[],
U"Flocker is creating a new proxy.")
DELETE_PROXY = ActionType(
_system(u"delete_proxy"),
[TARGET_IP, TARGET_PORT],
[],
u"Flocker is deleting an existing proxy.")
| {
"content_hash": "c3fcbb4f3322231485cd01c6fa3f8a9c",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 22.879310344827587,
"alnum_prop": 0.6593820648078372,
"repo_name": "beni55/flocker",
"id": "bd4a1f25ecbd4749a46d3713ccab798978d000d9",
"size": "1389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flocker/route/_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "540895"
},
{
"name": "Ruby",
"bytes": "797"
},
{
"name": "Shell",
"bytes": "3744"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmEndReplayingEvent(vim, *args, **kwargs):
'''This event indicates the end of a replay session on a virtual machine.'''
obj = vim.client.factory.create('ns0:VmEndReplayingEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "d0aa0dff2439a35a239a24c86ad7be1d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.5998300764655905,
"repo_name": "xuru/pyvisdk",
"id": "473e8880c1910b1d39df2af9591781ce299a836d",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/vm_end_replaying_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import collect_array as ca
import collect_id as ci
import collect_loop as cl
import collect_device as cd
def print_dict_sorted(mydict):
keys = sorted(mydict)
entries = ""
for key in keys:
value = mydict[key]
entries += "'" + key + "': " + value.__repr__() + ","
return "{" + entries[:-1] + "}"
class GenReverseIdx(object):
def __init__(self):
self.ReverseIdx = dict()
self.ReverseIdx[0] = 1
self.ReverseIdx[1] = 0
def get_reverse_idx(ast):
gen_reverse_idx = GenReverseIdx()
return gen_reverse_idx.ReverseIdx
class GenHostArrayData(object):
def __init__(self):
super(GenHostArrayData, self).__init__()
self.HstId = dict()
self.TransposableHstId = list()
self.Mem = dict()
def collect(self, ast):
arrays_ids = ca.GlobalArrayIds()
arrays_ids.visit(ast)
for n in arrays_ids.ids:
self.HstId[n] = 'hst_ptr' + n
self.Mem[n] = 'hst_ptr' + n + '_mem_size'
transposable_array_ids = ca.get_transposable_array_ids(ast)
for n in transposable_array_ids:
self.HstId[n] = n
self.TransposableHstId.append(n)
def get_mem_names(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.Mem
def get_host_ids(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.HstId
def gen_transposable_host_ids(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.TransposableHstId
def get_kernel_args(ast):
gen_kernel_args = GenKernelArgs()
gen_kernel_args.collect(ast)
return gen_kernel_args.kernel_args
class GenArrayDimNames(object):
def __init__(self):
self.num_array_dims = dict()
self.ArrayIdToDimName = dict()
def collect(self, ast):
num_array_dim = ca.NumArrayDim(ast)
num_array_dim.visit(ast)
self.num_array_dims = num_array_dim.numSubscripts
for array_name, num_dims in num_array_dim.numSubscripts.items():
tmp = list()
for i in xrange(num_dims):
tmp.append('hst_ptr' + array_name + '_dim' + str(i + 1))
self.ArrayIdToDimName[array_name] = tmp
stencil_array_id_to_dim_name = ca.LocalMemArrayIdToDimName()
stencil_array_id_to_dim_name.visit(ast)
for key, value in stencil_array_id_to_dim_name.ArrayIdToDimName.iteritems():
self.ArrayIdToDimName[key] = value
def get_array_id_to_dim_name(ast):
gen_array_dim_names = GenArrayDimNames()
gen_array_dim_names.collect(ast)
return gen_array_dim_names.ArrayIdToDimName
class GenIdxToDim(object):
def __init__(self):
self.IdxToDim = dict()
def collect(self, ast, par_dim=2):
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for i, n in enumerate(reversed(grid_indices)):
self.IdxToDim[i] = n
class GenKernelArgs(object):
def __init__(self):
self.kernel_args = dict()
def collect(self, ast):
arrays_ids = ca.GlobalArrayIds()
arrays_ids.visit(ast)
array_ids = arrays_ids.ids
# print self.ArrayIds
nonarray_ids = ci.GlobalNonArrayIds()
nonarray_ids.visit(ast)
non_array_ids = nonarray_ids.ids
mytype_ids = ci.GlobalTypeIds()
mytype_ids.visit(ast)
types = mytype_ids.types
gen_removed_ids = GenRemovedIds()
gen_removed_ids.collect(ast)
removed_ids = gen_removed_ids.removed_ids
kernel_arg_defines = ci.get_kernel_arg_defines(ast)
runocl_args = ci.get_runocl_args(ast)
arg_ids = non_array_ids.union(array_ids) - removed_ids - kernel_arg_defines - runocl_args
gen_array_dimnames = GenArrayDimNames()
gen_array_dimnames.collect(ast)
num_array_dims = gen_array_dimnames.num_array_dims
arrayid_to_dimname = gen_array_dimnames.ArrayIdToDimName
for n in arg_ids:
tmplist = {n}
try:
if num_array_dims[n] == 2:
tmplist.add(arrayid_to_dimname[n][0])
except KeyError:
pass
for m in tmplist - kernel_arg_defines:
self.kernel_args[m] = types[m]
class GenRemovedIds(object):
def __init__(self):
self.removed_ids = set()
def collect(self, ast):
grid_indices = cl.get_grid_indices(ast)
col_loop_limit = cl.LoopLimit()
col_loop_limit.visit(ast)
upper_limit = col_loop_limit.upper_limit
upper_limits = set(upper_limit[i] for i in grid_indices)
my_kernel = cd.get_kernel(ast)
ids_still_in_kernel = ci.Ids()
ids_still_in_kernel.visit(my_kernel)
self.removed_ids = upper_limits - ids_still_in_kernel.ids
def get_removed_ids(ast):
gen_removed_ids = GenRemovedIds()
gen_removed_ids.collect(ast)
return gen_removed_ids.removed_ids
class GenLocalArrayIdx(object):
def __init__(self):
self.IndexToLocalVar = dict()
def collect(self, ast):
par_dim = cl.get_par_dim(ast)
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for var in grid_indices:
self.IndexToLocalVar[var] = 'l' + var
def get_local_array_idx(ast):
gen_local_array_idx = GenLocalArrayIdx()
gen_local_array_idx.collect(ast)
return gen_local_array_idx.IndexToLocalVar
class GenIdxToThreadId(object):
def __init__(self):
self.IndexToThreadId = dict()
def collect(self, ast):
par_dim = cl.get_par_dim(ast)
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for i, n in enumerate(reversed(grid_indices)):
self.IndexToThreadId[n] = 'get_global_id(' + str(i) + ')'
def gen_idx_to_dim(ast):
par_dim = cl.get_par_dim(ast)
gi_to_dim = GenIdxToDim()
gi_to_dim.collect(ast, par_dim)
return gi_to_dim.IdxToDim
| {
"content_hash": "dd70135b35b43e3712467b4f06e31f31",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 97,
"avg_line_length": 27.517857142857142,
"alnum_prop": 0.609344581440623,
"repo_name": "dikujepsen/OpenTran",
"id": "cc51c9cc3969bec72bdfc6742b67c8c88dea867a",
"size": "6164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/framework/processing/collect_gen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "109"
},
{
"name": "C",
"bytes": "50583"
},
{
"name": "C++",
"bytes": "2400250"
},
{
"name": "Makefile",
"bytes": "8272"
},
{
"name": "Matlab",
"bytes": "6479"
},
{
"name": "Python",
"bytes": "1125197"
},
{
"name": "Shell",
"bytes": "4560"
}
],
"symlink_target": ""
} |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import pytest
from pytablewriter import FormatAttr, TableFormat
class Test_TableFormat_search_table_format:
@pytest.mark.parametrize(
["value", "expected"],
[
[
FormatAttr.TEXT,
[
TableFormat.ASCIIDOC,
TableFormat.BOLD_UNICODE,
TableFormat.BORDERLESS,
TableFormat.CSS,
TableFormat.CSV,
TableFormat.HTML,
TableFormat.JAVASCRIPT,
TableFormat.JSON,
TableFormat.JSON_LINES,
TableFormat.LATEX_MATRIX,
TableFormat.LATEX_TABLE,
TableFormat.LTSV,
TableFormat.MARKDOWN,
TableFormat.MEDIAWIKI,
TableFormat.NUMPY,
TableFormat.PANDAS,
TableFormat.PYTHON,
TableFormat.RST_CSV_TABLE,
TableFormat.RST_GRID_TABLE,
TableFormat.RST_SIMPLE_TABLE,
TableFormat.SPACE_ALIGNED,
TableFormat.TOML,
TableFormat.TSV,
TableFormat.UNICODE,
TableFormat.YAML,
],
],
[
FormatAttr.FILE,
[
TableFormat.ASCIIDOC,
TableFormat.CSS,
TableFormat.CSV,
TableFormat.EXCEL_XLS,
TableFormat.EXCEL_XLSX,
TableFormat.HTML,
TableFormat.JAVASCRIPT,
TableFormat.JSON,
TableFormat.JSON_LINES,
TableFormat.LATEX_MATRIX,
TableFormat.LATEX_TABLE,
TableFormat.LTSV,
TableFormat.MARKDOWN,
TableFormat.MEDIAWIKI,
TableFormat.NUMPY,
TableFormat.PANDAS,
TableFormat.PANDAS_PICKLE,
TableFormat.PYTHON,
TableFormat.RST_CSV_TABLE,
TableFormat.RST_GRID_TABLE,
TableFormat.RST_SIMPLE_TABLE,
TableFormat.SPACE_ALIGNED,
TableFormat.SQLITE,
TableFormat.TOML,
TableFormat.TSV,
TableFormat.YAML,
],
],
[
FormatAttr.BIN,
[
TableFormat.EXCEL_XLS,
TableFormat.EXCEL_XLSX,
TableFormat.SQLITE,
TableFormat.PANDAS_PICKLE,
],
],
[FormatAttr.API, [TableFormat.ELASTICSEARCH]],
[0, []],
],
)
def test_normal(self, value, expected):
assert set(TableFormat.find_all_attr(value)) == set(expected)
class Test_TableFormat_from_name:
@pytest.mark.parametrize(
["value", "expected"],
[
["csv", TableFormat.CSV],
["CSV", TableFormat.CSV],
["excel", TableFormat.EXCEL_XLSX],
],
)
def test_normal(self, value, expected):
assert TableFormat.from_name(value) == expected
class Test_TableFormat_from_file_extension:
@pytest.mark.parametrize(
["value", "expected"],
[
["csv", TableFormat.CSV],
[".CSV", TableFormat.CSV],
["xlsx", TableFormat.EXCEL_XLSX],
["md", TableFormat.MARKDOWN],
],
)
def test_normal(self, value, expected):
assert TableFormat.from_file_extension(value) == expected
| {
"content_hash": "6bdf724919fcb5804def2067c9705122",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 69,
"avg_line_length": 33.043103448275865,
"alnum_prop": 0.46230106965823115,
"repo_name": "thombashi/pytablewriter",
"id": "446ad40c79a90d0d2f1550e955849188a473b9df",
"size": "3833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_table_format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "649545"
}
],
"symlink_target": ""
} |
from urllib2 import Request,urlopen,URLError
import mysql.connector
import time
import datetime
import os
import pickle
import pandas as pd
import threading
from pandas.io import sql
from sqlalchemy import create_engine
def getPrice(tickers):
link = 'http://download.finance.yahoo.com/d/quotes.csv?'
arguments = 'f=aa2bb2b3b4cc1c3c4c6c8dd1d2ee1e7e8e9ghjkg1g3g4g5g6ii5j1j3j4j5j6k1k2k4k5ll1l2l3mm2m3m4m5m6m7m8nn4opp1p2p5p6qrr1r2r5r6r7ss1s7t1t7t8vv1v7ww1w4xy&'
req = link+arguments+tickers
request= Request(req)
columns = ['a','a2','b','b2','b3','b4','c','c1','c3','c4','c6','c8','d','d1','d2','e','e1','e7','e8','e9','g','h','j','k','g1','g3','g4','g5','g6','i','i5','j1','j3','j4','j5','j6','k1','k2','k4','k5','l','l1','l2','l3','m','m2','m3','m4','m5','m6','m7','m8','n','n4','o','p','p1','p2','p5','p6','q','r','r1','r2','r5','r6','r7','s','s1','s7','t1','t7','t8','v','v1','v7','w','w1','w4','x','y']
try:
frameEach = pd.DataFrame(index = None)
resp = urlopen(request)
data = resp.read()
frameEach = pd.read_csv(urlopen(request),header = None)
except URLError,e:
print "oops! cant connect"
return None
return
#frameEach = pd.concat([frameEach,tickerFrame],axis = 1)
frameEach.columns = columns
global tickerFrame
frameEach['tickerName']=tickerFrame
st = getCurrentTimestamp()
frameEach['timestamp'] = st
print frameEach
global frame
frame = frame.append(frameEach)
return
def getCurrentTimestamp():
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%M-%d %H:%M:%S')
return st
def getTimestampData(timestamp):
try:
data = pickle.load(open("cisco.p","rb"))
except (OSError,IOError) as E:
print " no file to read from "
return None
timestampData = {}
for tick,values in data.iteritems():
for timestamps,prices in values.iteritems():
if(timestamps == timestamp):
timestampData[tick] = prices
return timestampData
#read tickers from file and creates the argument list for api call
def concatTickers(filename):
with open(filename) as tickerNames:
tickerList = tickerNames.readlines()
if not tickerList:
print "empty file"
return None
tickers = 's='
global tickerFrame
fullList = []
for eachTicker in tickerList:
eachTicker = eachTicker.rstrip()
values = eachTicker.split(' ')
eachTicker = values[-1]
fullList.append(eachTicker)
tickers+=eachTicker
tickers+=','
tickers = tickers[:-1]
tickerFrame = pd.DataFrame()
#dataframe for tickers
tickerFrame['TickerName'] = fullList
return tickers
def writeToDB(frame):
engine = create_engine('mysql+mysqlconnector://honeybee:honeybee@127.0.0.1/stocksdb', echo=False)
frame.to_sql(name='financialTable', con=engine, if_exists = 'append', index=False)
def job():
#threading.Timer(1,job).start()
#runs twice for 300 companies
while(True):
global tickers
getPrice(tickers)
if(len(frame)==600):
print "len=600"
return
time.sleep(10)
def main():
global frame
frame = pd.DataFrame()
filename = 'tickers300.txt'
global tickers
tickers = concatTickers(filename)
job()
# frame.to_csv('out.csv',sep = ',')
assert frame['s'].all() == frame['tickerName'].all()
frame = frame.astype(object).where(pd.notnull(frame), None)
writeToDB(frame)
if __name__ == "__main__": main()
| {
"content_hash": "16f903532aa39f3ff9b3a594133ae75a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 395,
"avg_line_length": 29.807339449541285,
"alnum_prop": 0.6897506925207756,
"repo_name": "viep/StockScraper",
"id": "c588ebd3f783818f355127dbe3e7110183575e77",
"size": "3250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker-stockScraper/stockScraper/stockScraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10428"
}
],
"symlink_target": ""
} |
"""
enhance and wrap window for using convenience
Tested environment:
Mac OS X 10.6.8
http://doc.qt.nokia.com/latest/qdesktopwidget.html
http://www.pyside.org/docs/pyside/PySide/QtGui/QWidget.html
"""
__all__ = [
"auto_set_geometry",
"AutoSaveGeo",
"CustomDlg",
"CustomWin",
"CustomSheetWin",
]
import json
import os
from PySide import QtGui, QtCore
class AutoSaveGeo(QtGui.QMainWindow):
""" auto save (window) widget geometry before it destroy, and restore its geometry at next time. """
def __init__(self, user_data_path, w = 300, h = 500, parent = None):
super(AutoSaveGeo, self).__init__(parent)
self.resize(w, h)
self.user_data_path = user_data_path
if self.user_data_path:
self._load_win_geo()
def closeEvent(self, evt):
if hasattr(self, "user_data_path") and self.user_data_path:
self._save_win_geo()
return super(AutoSaveGeo, self).closeEvent(evt)
def _save_win_geo(self):
config_path = os.path.join(self.user_data_path, "win_geometry.json")
if not os.path.exists(self.user_data_path):
os.makedirs(self.user_data_path)
if os.path.exists(config_path):
f = file(config_path)
buf = f.read()
f.close()
else:
buf = None
datas = None
if buf:
datas = json.loads(buf)
if not datas:
datas = {}
win_geo_data = dict(
x = self.x(),
y = self.y(),
w = self.width(),
h = self.height())
datas[self.__class__.__name__] = win_geo_data
path = config_path
content = json.dumps(datas)
f = file(path, "w")
f.write(content)
f.close()
def _load_win_geo(self):
config_path = os.path.join(self.user_data_path, "win_geometry.json")
if not os.path.exists(self.user_data_path):
os.makedirs(self.user_data_path)
desktop = QtGui.QApplication.desktop()
x = desktop.width() / 2
y = (desktop.height() - self.height()) / 2
w = self.width()
h = self.height()
if os.path.exists(config_path):
f = file(config_path)
buf = f.read()
f.close()
else:
buf = None
datas = None
if buf:
datas = json.loads(buf)
if datas:
cls_name = self.__class__.__name__
geo = datas.get(cls_name)
if geo:
x, y, w, h = geo['x'], geo['y'], geo['w'], geo['h']
self.setGeometry(x, y, w, h)
class CustomDlg(QtGui.QDialog):
"""
Custom dialog template.
You should override there method:
- __init__
- get_inputs
- popup_and_get_inputs
"""
def __init__(self, parent, settings):
""" You should override this method """
super(CustomDlg, self).__init__(parent)
self.resize(400, 250)
self._settings = settings
# add custom sub-widgets here ...
def show_and_raise(self):
self.show()
self.raise_()
def keyPressEvent(self, evt):
close_win_cmd_w = (evt.key() == QtCore.Qt.Key_W and evt.modifiers() == QtCore.Qt.ControlModifier)
close_win_esc = (evt.key() == QtCore.Qt.Key_Escape)
if close_win_cmd_w or close_win_esc:
self.close()
return self._settings
def get_inputs(self):
""" You should override this method
update self._settings from custom sub-widgets ...
"""
return self._settings
@staticmethod
def popup_and_get_inputs(parent, settings):
""" You should override this method """
dlg = CustomDlg(parent, settings)
dlg.show()
dlg.exec_()
return dlg.get_inputs()
class CustomWin(QtGui.QWidget):
"""
Custom window template.
You should override there method:
- __init__
- get_inputs
- popup_and_get_inputs
"""
def __init__(self, parent, settings):
""" You should override this method """
super(CustomWin, self).__init__(parent)
self.resize(400, 250)
self._settings = settings
# add custom sub-widgets here ...
def show_and_raise(self):
self.show()
self.raise_()
def keyPressEvent(self, evt):
close_win_cmd_w = (evt.key() == QtCore.Qt.Key_W and evt.modifiers() == QtCore.Qt.ControlModifier)
close_win_esc = (evt.key() == QtCore.Qt.Key_Escape)
if close_win_cmd_w or close_win_esc:
self.close()
return self._settings
def get_inputs(self):
""" You should override this method
update self._settings from custom sub-widgets ...
"""
return self._settings
@staticmethod
def popup_and_get_inputs(parent, settings):
""" You should override this method """
dlg = CustomWin(parent, settings)
dlg.show()
return dlg.get_inputs()
class CustomSheetWin(QtGui.QWidget):
def __init__(self, parent = None):
super(CustomSheetWin, self).__init__(parent)
self.resize(400, 300)
self.setWindowFlags(QtCore.Qt.Sheet)
def closeEvent(self, evt):
self.emit(QtCore.SIGNAL("sheet_window_close( QWidget * )"), self)
return QtGui.QWidget.closeEvent(self, evt)
def emit_and_close(self, signal_name = "sheet_window_close_with_accept( QWidget * )"):
self.close()
self.emit(QtCore.SIGNAL(signal_name), self)
def keyPressEvent(self, evt):
close_win_cmd_w = (evt.key() == QtCore.Qt.Key_W and evt.modifiers() == QtCore.Qt.ControlModifier)
close_win_esc = (evt.key() == QtCore.Qt.Key_Escape)
if close_win_cmd_w or close_win_esc:
self.close()
return super(CustomSheetWin, self).keyPressEvent(evt)
_auto_set_geometry_offset_is_zero_if_mare_then = 5
_auto_set_geometry_offset_last_x = 0
_auto_set_geometry_offset_step = 20
def _get_offset_for_auto_set_geometry():
global _auto_set_geometry_offset_is_zero_if_mare_then
global _auto_set_geometry_offset_last_x
global _auto_set_geometry_offset_step
if _auto_set_geometry_offset_last_x > 0:
th = _auto_set_geometry_offset_last_x / _auto_set_geometry_offset_step
if th >= _auto_set_geometry_offset_is_zero_if_mare_then:
_auto_set_geometry_offset_last_x = 0
else:
_auto_set_geometry_offset_last_x += _auto_set_geometry_offset_step
else:
_auto_set_geometry_offset_last_x += _auto_set_geometry_offset_step
offset_x = offset_y = _auto_set_geometry_offset_last_x
return offset_x, offset_y
def auto_set_geometry(primary, secondary):
""" auto set the geometry of secondary window base on primary window geometry """
desktop = QtGui.QApplication.desktop()
px = primary.x()
primary_in_left_screen = (desktop.width() / 2 - primary.width() / 2) >= px
if primary_in_left_screen:
secondary_x_start = px + primary.width() + (_auto_set_geometry_offset_step / 4)
else:
secondary_x_start = px - primary.width() - (_auto_set_geometry_offset_step / 4)
secondary_y_start = (desktop.height() / 2) - (secondary.height() / 2) - _auto_set_geometry_offset_step
offset_x, offset_y = _get_offset_for_auto_set_geometry()
secondary.move(secondary_x_start + offset_x, secondary_y_start + offset_y)
def test_use_custom_dlg():
class CustomDlgDemo(AutoSaveGeo):
def __init__(self, parent = None, user_data_path = None):
super(CustomDlgDemo, self).__init__(parent = parent, user_data_path = user_data_path)
settings = {}
new_settings = CustomDlg.popup_and_get_inputs(parent = self, settings = settings)
print "new_settings:", new_settings
def show_and_raise(self):
self.show()
self.raise_()
app_name = "foo"
#tmp_path = os.getenv("TMP") or "/tmp"
PWD = os.path.dirname(os.path.realpath(__file__))
tmp_path = PWD
app_data_path = os.path.join(tmp_path, app_name)
app = QtGui.QApplication(sys.argv)
demo = CustomDlgDemo(user_data_path = app_data_path)
demo.show_and_raise()
sys.exit(app.exec_())
def test_use_auto_set_secondary_win_geometry():
class SecondaryWindow(QtGui.QWidget):
def __init__(self, name = ""):
super(SecondaryWindow, self).__init__()
self.setWindowTitle('Window #%s' % name)
self.resize(200, 200)
def keyPressEvent(self, evt):
close_win_cmd_w = (evt.key() == QtCore.Qt.Key_W and evt.modifiers() == QtCore.Qt.ControlModifier)
close_win_esc = (evt.key() == QtCore.Qt.Key_Escape)
if close_win_cmd_w or close_win_esc:
self.close()
class AutoSetGeoDemo(QtGui.QWidget):
def __init__(self):
super(AutoSetGeoDemo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
btn = QtGui.QPushButton("create", self)
btn.clicked.connect(self._btn_cb)
btn.move(20, 20)
# following is optional
self.win_list = []
def _btn_cb(self):
# following is optional
win_name = str(len(self.win_list))
secondary_win_obj = SecondaryWindow(name = win_name)
auto_set_geometry(primary = self, secondary = secondary_win_obj)
secondary_win_obj.show()
# following is optional
self.win_list.append(secondary_win_obj)
def show_and_raise(self):
self.show()
self.raise_()
app = QtGui.QApplication(sys.argv)
demo = AutoSetGeoDemo()
demo.show_and_raise()
sys.exit(app.exec_())
#if __name__ == "__main__":
# test_use_custom_dlg()
# test_use_auto_set_secondary_win_geometry() | {
"content_hash": "5c81663a2b4f6cb3304e6805efdd8ab0",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 109,
"avg_line_length": 28.626436781609197,
"alnum_prop": 0.5770929532222445,
"repo_name": "alexlib/Qt-Python-Binding-Examples",
"id": "0a4e2108ebf8bc8953c5009cc7311495c08cc339",
"size": "9984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qcommons/qwinutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "251904"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tagit.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "39b75069886b91d1f00a7958eb2a4613",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.19047619047619,
"alnum_prop": 0.6197183098591549,
"repo_name": "ned2/tator",
"id": "61f6006071e646bf4eceac4e1589a96c976b1cfb",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tator/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1416"
},
{
"name": "HTML",
"bytes": "327019"
},
{
"name": "JavaScript",
"bytes": "5439"
},
{
"name": "Jupyter Notebook",
"bytes": "90740"
},
{
"name": "Python",
"bytes": "39551"
},
{
"name": "TeX",
"bytes": "54227"
}
],
"symlink_target": ""
} |
import json
class JsonResponse(object):
def __init__(self, result_key):
self.result_key = result_key
def format_job_for_response(self, job, final_output_key):
job_info = {
'id': job.id,
'kind': job.kind,
}
# If there were errors encode them into meta
if job.get('errors'):
job_info['errors'] = job.errors
meta = {
'job': job_info,
}
data = job.output.get(final_output_key, '')
return json.dumps({
'meta': meta,
'resp': data,
})
| {
"content_hash": "9ca55d17aaa888473462627c8c40ef7e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 22.037037037037038,
"alnum_prop": 0.48739495798319327,
"repo_name": "voidfiles/dabba",
"id": "69e0cf3f219aa08f9f19f435dfecdeeb6149083a",
"size": "595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dabba/pipeline/results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15082"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.