repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ncclient/ncclient
|
examples/vendor/juniper/unknown-rpc.py
|
Python
|
apache-2.0
| 861
| 0.002323
|
#!/usr/bin/env python
import logging
from ncclient
|
import manager
from ncclient.xml_ import *
def connect(host, port, user, password):
conn = manager.connect(host=host,
port=port,
username=user,
password=password,
timeout=60,
device_params={'name': 'junos'},
hostkey_verify=False)
result = conn.get_software_info
|
rmation('brief', test='me')
logging.info(result)
result = conn.get_chassis_inventory('extensive')
logging.info(result)
if __name__ == '__main__':
LOG_FORMAT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=LOG_FORMAT)
connect('router', 830, 'netconf', 'juniper!')
|
tannishk/airmozilla
|
airmozilla/manage/urls.py
|
Python
|
bsd-3-clause
| 12,711
| 0
|
from django.conf.urls import patterns, url
from .views import (
picturegallery,
dashboard,
events,
approvals,
suggestions,
vidly_media,
comments,
loggedsearches,
users,
groups,
channels,
locations,
regions,
templates,
topics,
tags,
recruitmentmessages,
surveys,
staticpages,
url_transforms,
cronlogger,
permissions,
autocompeter,
uploads,
taskstester,
)
urlpatterns = patterns(
'',
url(r'^/?$', dashboard.dashboard, name='dashboard'),
url(r'^data/$', dashboard.dashboard_data, name='dashboard_data'),
url(r'^graphs/$', dashboard.dashboard_graphs, name='dashboard_graphs'),
url(r'^graphs/data/$', dashboard.dashboard_data_graphs,
name='dashboard_data_graphs'),
url(r'^users/(?P<id>\d+)/$', users.user_edit, name='user_edit'),
url(r'^users/$', users.users, name='users'),
url(r'^users/data/$', users.users_data, name='users_data'),
url(r'^groups/(?P<id>\d+)/$', groups.group_edit, name='group_edit'),
url(r'^groups/remove/(?P<id>\d+)/$', groups.group_remove,
name='group_remove'),
url(r'^groups/new/$', groups.group_new, name='group_new'),
url(r'^groups/$', groups.groups, name='groups'),
url(r'^events/request/$', events.event_request, name='event_request'),
url(r'^events/(?P<id>\d+)/$', events.event_edit, name='event_edit'),
url(r'^events/(?P<id>\d+)/privacy-vidly-mismatch/$',
events.event_privacy_vidly_mismatch,
name='event_privacy_vidly_mismatch'),
url(r'^events/(?P<id>\d+)/assignment/$',
events.event_assignment,
name='event_assignment'),
url(r'^events/(?P<id>\d+)/transcript/$',
events.event_transcript,
name='event_transcript'),
url(r'^events/(?P<id>\d+)/upload/$',
events.event_upload,
name='event_upload'),
url(r'^events/(?P<id>\d+)/vidly-submissions/$',
events.event_vidly_submissions,
name='event_vidly_submissions'),
url(r'^events/(?P<id>\d+)/vidly-submissions/submission'
r'/(?P<submission_id>\d+)/$',
events.event_vidly_submission,
name='event_vidly_submission'),
url(r'^events/(?P<id>\d+)/comments/$',
events.event_comments,
name='event_comments'),
url(r'^events/(?P<id>\d+)/comments/configuration/$',
events.event_discussion,
name='event_discussion'),
url(r'^events/(?P<id>\d+)/stop-live/$', events.event_stop_live,
name='stop_live_event'),
url(r'^events/(?P<id>\d+)/delete/$', events.event_delete,
name='event_delete'),
url(r'^events/(?P<id>\d+)/survey/$', events.event_survey,
name='event_survey'),
url(r'^events/(?P<id>\d+)/tweets/$', events.event_tweets,
name='event_tweets'),
url(r'^events/(?P<id>\d+)/tweets/new/$', events.new_event_tweet,
name='new_event_tweet'),
url(r'^events/all/tweets/$', events.all_event_tweets,
name='all_event_tweets'),
url(r'^events/archive/(?P<id>\d+)/$', events.event_archive,
name='event_archive'),
url(r'^events/archive/(?P<id>\d+)/auto/$',
events.event_archive_auto,
name='event_archive_auto'),
url(r'^events/(?P<id>\d+)/archive-time/$', events.event_archive_time,
name='event_archive_time'),
url(r'^events/fetch/duration/(?P<id>\d+)/$',
events.event_fetch_duration,
name='event_fetch_duration'),
url(r'^events/fetch/screencaptures/(?P<id>\d+)/$',
events.event_fetch_screencaptures,
name='event_fetch_screencaptures'),
url(r'^events/duplicate/(?P<duplicate_id>\d+)/$', events.event_request,
name='event_duplicate'),
url(r'^events/vidlyurltoshortcode/(?P<id>\d+)/',
events.vidly_url_to_shortcode,
name='vidly_url_to_shortcode'),
url(r'^events/hits/$', events.event_hit_stats, name='event_hit_stats'),
url(r'^events/assignments/$',
events.event_assignments,
name='event_assignments'),
url(r'^events/assignments.ics$',
events.event_assignments_ical,
name='event_assignments_ical'),
url(r'^events/$', events.events, name='events'),
url(r'^events/data/$', events.events_data, name='events_data'),
url(r'^events/redirect_thumbnail/(?P<id>\d+)/$',
events.redirect_event_thumbnail,
name='redirect_event_thumbnail'),
url(r'^surveys/$', surveys.surveys_, name='surveys'),
url(r'^surveys/new/$', surveys.survey_new, name='survey_new'),
url(r'^surveys/(?P<id>\d+)/$', surveys.survey_edit, name='survey_edit'),
url(r'^surveys/(?P<id>\d+)/delete/$', surveys.survey_delete,
name='survey_delete'),
url(r'^surveys/(?P<id>\d+)/questions/$', surveys.survey_questions,
name='survey_questions'),
url(r'^surveys/(?P<id>\d+)/question/(?P<question_id>\d+)/$',
surveys.survey_question_edit,
name='survey_question_edit'),
url(r'^surveys/(?P<id>\d+)/question/(?P<question_id>\d+)/delete/$',
surveys.survey_question_delete,
name='survey_question_delete'),
url(r'^surveys/(?P<id>\d+)/question/new/$',
surveys.survey_question_new,
name='survey_question_new'),
url(r'^comments/$', comments.all_comments, name='all_comments'),
url(r'^comments/(?P<id>\d+)/$',
comments.comment_edit,
name='comment_edit'),
url(r'^events-autocomplete/$', events.event_autocomplete,
name='event_autocomplete'),
url(r'^channels/new/$', channels.channel_new, name='channel_new'),
url(r'^channels/(?P<id>\d+)/$', channels.channel_edit,
name='channel_edit'),
url(r'^channels/remove/(?P<id>\d+)/$', channels.channel_remove,
name='channel_remove'),
url(r'^channels/$', channels.channels, name='channels'),
url(r'^templates/env-autofill/$', templates.template_env_autofill,
name='template_env_autofill'),
url(r'^templ
|
ates/new/$', templates.template_new, name='template_new'),
url(r'^templates/(?P<id>\d+)/$', templates.template_edit,
name='template_edit'),
url(r'^templates/(?P<id>\d+)/migrate/$', templ
|
ates.template_migrate,
name='template_migrate'),
url(r'^templates/remove/(?P<id>\d+)/$', templates.template_remove,
name='template_remove'),
url(r'^templates/$', templates.templates, name='templates'),
url(r'^tags/$', tags.tags, name='tags'),
url(r'^tags/data/$', tags.tags_data, name='tags_data'),
url(r'^tags/(?P<id>\d+)/$', tags.tag_edit, name='tag_edit'),
url(r'^tags/remove/(?P<id>\d+)/$', tags.tag_remove, name='tag_remove'),
url(r'^tags/merge/(?P<id>\d+)/$', tags.tag_merge, name='tag_merge'),
url(r'^locations/new/$', locations.location_new, name='location_new'),
url(r'^locations/(?P<id>\d+)/$', locations.location_edit,
name='location_edit'),
url(r'^locations/remove/(?P<id>\d+)/$', locations.location_remove,
name='location_remove'),
url(r'^locations/tz/$', locations.location_timezone,
name='location_timezone'),
url(r'^locations/$', locations.locations, name='locations'),
url(r'^regions/new/$', regions.region_new, name='region_new'),
url(r'^regions/(?P<id>\d+)/$', regions.region_edit,
name='region_edit'),
url(r'^regions/remove/(?P<id>\d+)/$', regions.region_remove,
name='region_remove'),
url(r'^regions/$', regions.regions, name='regions'),
url(r'^topics/new/$', topics.topic_new, name='topic_new'),
url(r'^topics/(?P<id>\d+)/$', topics.topic_edit,
name='topic_edit'),
url(r'^topics/remove/(?P<id>\d+)/$', topics.topic_remove,
name='topic_remove'),
url(r'^topics/$', topics.topics, name='topics'),
url(r'^approvals/$', approvals.approvals, name='approvals'),
url(r'^approvals/reconsider/$', approvals.approval_reconsider,
name='approval_reconsider'),
url(r'^approvals/(?P<id>\d+)/$', approvals.approval_review,
name='approval_review'),
url(r'^pages/$', staticpages.staticpages, name='staticpages'),
url(r'^pages/new/$', staticpages.staticpage_new, name='staticpage_new'),
url(r'^pages/(?P<id>\d+)/$', staticpages.staticpage_edit,
name='staticpage_edit'),
url(
|
starrify/scrapy
|
scrapy/downloadermiddlewares/httpcache.py
|
Python
|
bsd-3-clause
| 5,481
| 0.001642
|
from email.utils import formatdate
from typing import Optional, Type, TypeVar
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
ConnectionRefusedError,
DNSLookupError,
TCPTimedOutError,
TimeoutError,
)
from twisted.web.client import ResponseFailed
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector
from scrapy.utils.misc import load_object
HttpCacheMiddlewareTV = TypeVar("HttpCacheMiddlewareTV", bound="HttpCacheMiddleware")
class HttpCacheMiddleware:
DOWNLOAD_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError)
def __init__(self, settings: Settings, stats: StatsCollector) -> None:
if not settings.getbool('HTTPCACHE_ENABLED'):
raise NotConfigured
self.policy = load_object(settings['HTTPCACHE_POLICY'])(settings)
self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings)
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
self.stats = stats
@classmethod
def from_crawler(cls: Type[HttpCacheMiddlewareTV], crawler: Crawler) -> HttpCacheMiddlewareTV:
o = cls(crawler.settings, crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider: Spider) -> None:
self.storage.open_spider(spider)
def spider_closed(self, spider: Spider) -> None:
self.storage.close_spider(spider)
def process_request(self, request: Request, spider: Spider) -> Optional[Response]:
if request.meta.get('dont_cache', False):
return None
# Skip uncacheable requests
if not self.policy.should_cache_request(request):
request.meta['_dont_cache'] = True # flag as uncacheable
return None
# Look for cached response and check if expired
cachedresponse = self.storage.retrieve_response(spider, request)
if cachedresponse is None:
self.stats.inc_value('httpcache/miss', spider=spider)
if self.ignore_missing:
self.stats.inc_value('httpcache/ignore', spider=spider)
raise IgnoreRequest("Ignored request not in cache: %s" % request)
return None # first time request
# Return cached response only if not expired
cachedresponse.flags.append('cached')
if self.policy.is_cached_response_fresh(cachedresponse, request):
self.stats.inc_value('httpcache/hit', spider=spider)
return cachedresponse
# Keep a reference to cached response to avoid a second cache lookup on
# process_response hook
request.meta['cached_response'] = cachedresponse
|
return None
def process_response(self, request: Request, response: Response, spider: Spider) -> Response:
if request.meta.get('dont_cache', False):
return response
# Skip cached responses and uncacheable requests
|
if 'cached' in response.flags or '_dont_cache' in request.meta:
request.meta.pop('_dont_cache', None)
return response
# RFC2616 requires origin server to set Date header,
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18
if 'Date' not in response.headers:
response.headers['Date'] = formatdate(usegmt=True)
# Do not validate first-hand responses
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is None:
self.stats.inc_value('httpcache/firsthand', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
if self.policy.is_cached_response_valid(cachedresponse, response, request):
self.stats.inc_value('httpcache/revalidate', spider=spider)
return cachedresponse
self.stats.inc_value('httpcache/invalidate', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
def process_exception(
self, request: Request, exception: Exception, spider: Spider
) -> Optional[Response]:
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is not None and isinstance(exception, self.DOWNLOAD_EXCEPTIONS):
self.stats.inc_value('httpcache/errorrecovery', spider=spider)
return cachedresponse
return None
def _cache_response(
self, spider: Spider, response: Response, request: Request, cachedresponse: Optional[Response]
) -> None:
if self.policy.should_cache_response(response, request):
self.stats.inc_value('httpcache/store', spider=spider)
self.storage.store_response(spider, request, response)
else:
self.stats.inc_value('httpcache/uncacheable', spider=spider)
|
CDHgit/cs3240-labdemo
|
helper.py
|
Python
|
mit
| 137
| 0.043796
|
def greeting(msg):
print(msg)
def valedi
|
ction(msg):
for char in reversed(msg):
print(char, e
|
nd = "" )
print("\n")
|
IMIO/django-fixmystreet
|
django_fixmystreet/webhooks/inbound.py
|
Python
|
agpl-3.0
| 6,983
| 0.003007
|
# -*- coding: utf-8 -*-
# pylint: disable=C0321,E1120,E1123,W0223
"""
Inbound webhook handlers.
"""
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django_fixmystreet.fixmystreet.models import FMSUser, Report, ReportAttachment, ReportComment, ReportEventLog
from django_fixmystreet.fixmystreet.utils import check_responsible_permission, check_contractor_permission
class NotLinkedWithThirdPartyError(Exception): pass
class ThirdPartyNotAuthorizedError(Exception): pass
class InvalidReportStatusError(Exception): pass
class BadRequestError(Exception): pass
class ReportAcceptInWebhookMixin(object):
ACTION_MESSAGE = u""
def run(self):
self._validate()
context = {
"action_msg": self.ACTION_MESSAGE,
"reference_id": self._data["reference_id"],
"comment": self._data["comment"],
}
self._add_comment(context)
def _validate(self):
super(ReportAcceptInWebhookMixin, self)._validate()
# Required fields.
if not self._data.get("reference_id"):
raise BadRequestError(u"'data.referenceId' is required.")
class ReportRejectInWebhookMixin(object):
ACTION_MESSAGE = u""
def run(self):
self._validate()
context = {
"action_msg": self.ACTION_MESSAGE,
"comment": self._data["comment"],
}
self._add_comment(context)
def _validate(self):
super(ReportRejectInWebhookMixin, self)._validate()
# Required fields.
if not self._data.get("comment"):
raise BadRequestError(u"'data.comment' is required.")
class ReportCloseInWebhookMixin(object):
ACTION_MESSAGE = u""
def run(self):
self._validate()
context = {
"action_msg": self.ACTION_MESSAGE,
"reference_id": self._data["reference_id"],
"comment": self._data["comment"],
}
self._add_comment(context)
def _validate(self):
super(ReportCloseInWebhookMixin, self)._validate()
# Required fields.
if not self._data.get("reference_id"):
raise BadRequestError(u"'data.referenceId' is required.")
class AbstractBaseInWebhook(object):
"""
Abstract inbound webhook handler. Every inbound webhook must derive from this class.
Class naming convention: ``<Resource><Hook><Action>InWebhook``.
"""
def __init__(self, meta, data, user=None):
self._meta = meta
self._data = data
self._user = user
def run(self):
raise NotImplementedError()
class AbstractReportInWebhook(AbstractBaseInWebhook):
"""Abstract inbound webhook handler for ``report.*.*``."""
def __init__(self, meta, data
|
, user=None):
super(AbstractReportInWebhook, self).__init__(meta, data, user=user)
self._report = Report.objects.get(pk=meta["id"])
self._third_party = None
def _add_comment(self, context):
context["action_msg"] = context["action_msg"].format(third_party=self._third_party.name)
formatted_comment = render_to_string("webhooks/report_comment.txt", context)
fms_user = FMSUser.objects.get(
|
pk=self._user.id)
comment = ReportComment(
report=self._report, text=formatted_comment, type=ReportAttachment.DOCUMENTATION, created_by=fms_user
)
comment.save()
def _user_has_permission(self):
raise NotImplementedError()
def _validate(self):
if self._third_party is None:
raise NotLinkedWithThirdPartyError(u"Report not linked with a third-party.")
if not self._report.is_in_progress():
raise InvalidReportStatusError(u"Report not in a valid state.")
if not self._user_has_permission():
raise ThirdPartyNotAuthorizedError(u"No authorization for this report.")
# Required fields.
if not self._meta.get("id"):
raise BadRequestError(u"'meta.id' is required.")
class AbstractReportAssignmentInWebhook(AbstractReportInWebhook):
"""Abstract inbound webhook handler for ``report.assignment.*``."""
def __init__(self, meta, data, user=None):
super(AbstractReportAssignmentInWebhook, self).__init__(meta, data, user=user)
self._third_party = self._report.contractor
def _user_has_permission(self):
return check_contractor_permission(self._user, self._report)
class ReportAssignmentAcceptInWebhook(ReportAcceptInWebhookMixin, AbstractReportAssignmentInWebhook):
"""Inbound webhook handler for ``report.assignment.accept``."""
ACTION_MESSAGE = _(u"Report assignment was accepted by {third_party}.")
class ReportAssignmentRejectInWebhook(ReportRejectInWebhookMixin, AbstractReportAssignmentInWebhook):
"""Inbound webhook handler for ``report.assignment.reject``."""
ACTION_MESSAGE = _(u"Report assignment was rejected by {third_party}.")
class ReportAssignmentCloseInWebhook(ReportCloseInWebhookMixin, AbstractReportAssignmentInWebhook):
"""Inbound webhook handler for ``report.assignment.close``."""
ACTION_MESSAGE = _(u"Report assignment was closed by {third_party}.")
class AbstractReportTransferInWebhook(AbstractReportInWebhook):
"""Abstract inbound webhook handler for ``report.transfer.*``."""
def __init__(self, meta, data, user=None):
super(AbstractReportTransferInWebhook, self).__init__(meta, data, user=user)
self._third_party = self._report.responsible_department
def _user_has_permission(self):
return check_responsible_permission(self._user, self._report)
class ReportTransferAcceptInWebhook(ReportAcceptInWebhookMixin, AbstractReportTransferInWebhook):
"""Inbound webhook handler for ``report.transfer.accept``."""
ACTION_MESSAGE = _(u"Report transfer was accepted by {third_party}.")
class ReportTransferRejectInWebhook(ReportRejectInWebhookMixin, AbstractReportTransferInWebhook):
"""Inbound webhook handler for ``report.transfer.reject``."""
ACTION_MESSAGE = _(u"Report transfer was rejected by {third_party}.")
def run(self):
super(ReportTransferRejectInWebhook, self).run()
self._report.responsible_department = ReportEventLog.objects.filter(
report=self._report,
organisation=self._report.responsible_entity,
event_type=ReportEventLog.MANAGER_ASSIGNED
).latest("event_at").related_old
self._report.responsible_entity = self._report.responsible_department.dependency
self._report.status = Report.MANAGER_ASSIGNED
self._report.save()
class ReportTransferCloseInWebhook(ReportCloseInWebhookMixin, AbstractReportTransferInWebhook):
"""Inbound webhook handler for ``report.transfer.close``."""
ACTION_MESSAGE = _(u"Report transfer was closed by {third_party}.")
def run(self):
super(ReportTransferCloseInWebhook, self).run()
self._report.close()
|
aranjan7/contrail-controller-aranjan
|
src/config/utils/provision_encap.py
|
Python
|
apache-2.0
| 5,187
| 0.006748
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from vnc_api.vnc_api import *
class EncapsulationProvision(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/')
encap_obj=EncapsulationPriorities
|
Type(encapsulation=self._args.encap_priority.split(","))
try:
current_config=self._vnc_lib.global_vrouter_config_read(
|
fq_name=['default-global-system-config',
'default-global-vrouter-config'])
except Exception as e:
if self._args.oper == "add":
conf_obj=GlobalVrouterConfig(encapsulation_priorities=encap_obj,vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode)
result=self._vnc_lib.global_vrouter_config_create(conf_obj)
print 'Created.UUID is %s'%(result)
return
current_linklocal=current_config.get_linklocal_services()
encapsulation_priorities=encap_obj
vxlan_network_identifier_mode=current_config.get_vxlan_network_identifier_mode()
if self._args.oper != "add":
encap_obj=EncapsulationPrioritiesType(encapsulation=[])
conf_obj=GlobalVrouterConfig(linklocal_services=current_linklocal,
encapsulation_priorities=encap_obj)
else :
conf_obj=GlobalVrouterConfig(linklocal_services=current_linklocal,
encapsulation_priorities=encapsulation_priorities,
vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode)
result=self._vnc_lib.global_vrouter_config_update(conf_obj)
print 'Updated.%s'%(result)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_encap.py
--api_server_ip 127.0.0.1
--api_server_port 8082
--encap_priority "MPLSoUDP,MPLSoGRE,VXLAN"
--vxlan_vn_id_mode "automatic"
--oper <add | delete>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'oper': 'add',
'encap_priority': 'MPLSoUDP,MPLSoGRE,VXLAN',
'vxlan_vn_id_mode' : 'automatic'
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'admin'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--api_server_ip", help="IP address of api server")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--encap_priority", help="List of Encapsulation priority")
parser.add_argument(
"--vxlan_vn_id_mode", help="Virtual Network id type to be used")
parser.add_argument(
"--oper", default='add',help="Provision operation to be done(add or delete)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
self._args = parser.parse_args(remaining_argv)
if not self._args.encap_priority:
parser.error('encap_priority is required')
# end _parse_args
# end class EncapsulationProvision
def main(args_str=None):
EncapsulationProvision(args_str)
# end main
if __name__ == "__main__":
main()
|
harshilasu/GraphicMelon
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/ec2/test_connection.py
|
Python
|
gpl-3.0
| 8,892
| 0.000225
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2009, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
class EC2ConnectionTest(unittest.TestCase):
ec2 = True
@attr('notdefault')
def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
print '--- running EC2Connection tests ---'
c = EC2Connection()
# get list of private AMI's
rs = c.get_all_images(owners=[user_id])
assert len(rs) > 0
# now pick the first one
image = rs[0]
# temporarily make this image runnable by everyone
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
assert 'groups' not in d
def test_1_basic(self):
# create 2 new security groups
c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group1 = c.create_security_group(group1_name, group_desc)
time.sleep(2)
group2_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group2 = c.create_security_group(group2_name, group_desc)
# now get a listing of all security groups and look for our new one
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group1_name:
found = True
assert found
# now pass arg to filter results to only our new group
rs = c.get_all_security_groups([group1_name])
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
status = c.revoke_security_group(group1.name,
|
group2.name,
group2.owner_id)
assert status
# now try specifying a specific port
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
|
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group2_name:
found = True
assert not found
group = group1
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
for image in rs:
if image.location == img_loc:
break
reservation = image.run(security_groups=[group.name])
instance = reservation.instances[0]
while instance.state != 'running':
print '\tinstance is %s' % instance.state
time.sleep(30)
instance.update()
# instance in now running, try to telnet to port 80
t = telnetlib.Telnet()
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now open up port 80 and try again, it should work
group.authorize('tcp', 80, 80, '0.0.0.0/0')
t.open(instance.dns_name, 80)
t.close()
# now revoke authorization and try again
group.revoke('tcp', 80, 80, '0.0.0.0/0')
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now kill the instance and delete the security group
instance.terminate()
# check that state and previous_state have updated
assert instance.state == 'shutting-down'
assert instance.state_code == 32
assert instance.previous_state == 'running'
assert instance.previous_state_code == 16
# unfortunately, I can't delete the sg within this script
#sg.delete()
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
assert status
# now get a listing of all key pairs and look for our new one
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert found
# now pass arg to filter results to only our new key pair
rs = c.get_all_key_pairs([key_name])
assert len(rs) == 1
key_pair = rs[0]
# now delete the key pair
status = c.delete_key_pair(key_name)
# now make sure it's really gone
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert not found
# short test around Paid AMI capability
demo_paid_ami_id = 'ami-bd9d78d4'
demo_paid_ami_product_code = 'A79EC0DB'
l = c.get_all_images([demo_paid_ami_id])
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
print '--- tests completed ---'
def test_dry_run(self):
c = EC2Connection()
dry_run_msg = 'Request would have succeeded, but DryRun flag is set.'
try:
rs = c.get_all_images(dry_run=True)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small',
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# Need an actual instance for the rest of this...
rs = c.run_instances(
image_id='ami-a0cd60c9'
|
hortonworks/hortonworks-sandbox
|
apps/oozie/src/oozie/management/commands/oozie_setup.py
|
Python
|
apache-2.0
| 3,188
| 0.006589
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions Copyright © 2013 Hortonworks, Inc.
import logging
import os
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from hadoop import cluster
from hadoop.fs.hadoopfs import Hdfs
from liboozie.conf import REMOTE_DEPLOYMENT_DIR
from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
fs = cluster.get_hdfs()
remote_dir = create_directories(fs)
# Copy examples binaries
for name in os.listdir(LOCAL_SAMPLE_DIR.get()):
local_dir = fs.join(LOCAL_SAMPLE_DIR.get(), name)
remote_data_dir = fs.join(remote_dir, name)
LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Copy sample data
local_dir = LOCAL_SAMPLE_DATA_DIR.get()
remote_data_dir = fs.join(remote_dir, 'data')
LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Load jobs
sample, created = User.objects.get_or_create(username='sample')
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2)
from oozie.models import Job
Job.objects.filter(owner__id=1100713).update(owner=sample) # 11OOZIE
def create_directories(fs):
# If needed, create the remote home, deployment and data directories
directories = (REMOTE_DEPLOYMENT_DIR.get(), REMOTE_SAMPLE_DIR.get())
for directory in directories:
if not fs.do_as_user("hdfs", fs.exists, directory):
|
remote_home_dir = Hdfs.join('/user', "hdfs")
if directory.startswith(remote_home_dir):
# Home is 755
fs.do_as_user("hdfs", fs.create
|
_home_dir, remote_home_dir)
# Shared by all the users
fs.do_as_user("hdfs", fs.mkdir, directory, 511)
fs.do_as_user("hdfs", fs.chmod, directory, 511) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
return REMOTE_SAMPLE_DIR.get()
|
t3dev/odoo
|
addons/hw_drivers/__manifest__.py
|
Python
|
gpl-3.0
| 592
| 0.001689
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Hardware Proxy',
'category': 'IOT',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'websi
|
te': 'https://www.odoo.com/page/iot',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this se
|
rver.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'installable': False,
}
|
bokeh/bokeh
|
sphinx/source/docs/first_steps/examples/first_steps_1_simple_line.py
|
Python
|
bsd-3-clause
| 358
| 0
|
from bokeh.plotting import figure, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title
|
and axis labels
p = figure(title="Simple line example", x_axis_label="x", y_axis_label="y")
# add a line renderer with l
|
egend and line thickness
p.line(x, y, legend_label="Temp.", line_width=2)
# show the results
show(p)
|
google-research/google-research
|
non_semantic_speech_benchmark/data_prep/beam_dofns_test.py
|
Python
|
apache-2.0
| 16,889
| 0.004855
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_dofns."""
from absl.testing import absltest
from absl.testing import parameterized
import apache_beam as beam
import numpy as np
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import beam_dofns
from non_semantic_speech_benchmark.data_prep import data_prep_utils
from non_semantic_speech_benchmark.export_model import tf_frontend
BASE_SHAPE_ = (15, 5)
class FakeMod(object):
def __call__(self, *args, **kwargs):
del args, kwargs
return {'output_key':
np.zeros([BASE_SHAPE_[0], 1, BASE_SHAPE_[1]], np.float32)}
def build_tflite_interpreter_dummy(tflite_model_path):
del tflite_model_path
return None
def _s2e(audio_samples, sample_rate, module_location, output_key, name):
"""Mock waveform-to-embedding computation."""
del audio_samples, sample_rate, module_location, output_key, name
return np.zeros(BASE_SHAPE_, dtype=np.float32)
def make_tfexample(l):
ex = tf.train.Example()
ex.features.feature['audio'].float_list.value.extend(list(range(l)))
ex.features.feature['label'].bytes_list.value.append(b'dummy_lbl')
ex.features.feature['speaker_id'].bytes_list.value.append(b'dummy_spkr')
ex.features.feature['sample_rate'].int64_list.value.append(32000)
return ex
class MockModule(object):
def __init__(self, output_keys):
self.signatures = {'waveform': self._fn}
self.output_keys = output_keys
def _fn(self, waveform, paddings):
del paddings
bs = waveform.shape[0] if waveform.ndim > 1 else 1
assert isinstance(bs, int)
return {k: tf.ones([bs, 5, 10]) for k in self.output_keys}
class BeamDofnsTest(parameterized.TestCase):
@parameterized.parameters(
{'average_over_time': True, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': None, 'sample_rate': 5},
)
def test_compute_embedding_dofn(self, average_over_time, sample_rate_key,
sample_rate):
# Establish required key names.
audio_key = 'audio_key'
# Construct the tf.train.Example test data.
ex = tf.train.Example()
ex.features.feature[audio_key].float_list.value.extend(
np.zeros(2000, np.float32))
if sample_rate_key:
ex.features.feature[sample_rate_key].int64_list.value.append(8000)
old_k = 'oldkey'
do_fn = beam_dofns.ComputeEmbeddingMapFn(
name='module_name',
module='@loc',
output_key='output_key',
audio_key=audio_key,
sample_rate_key=sample_rate_key,
sample_rate=sample_rate,
average_over_time=average_over_time,
setup_fn=lambda _: FakeMod())
do_fn.setup()
new_k, new_v = next(do_fn.process((old_k, ex)))
self.assertEqual(new_k, old_k)
expected_shape = (1, BASE_SHAPE_[1]) if average_over_time else BASE_SHAPE_
self.assertEqual(new_v.shape, expected_shape)
@parameterized.parameters(
{'average_over_time': True, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': 's', 'sample_rate': None},
)
def test_compute_embedding_dofn_custom_call(self, average_over_time,
sample_rate_key, sample_rate):
|
# Establish required key names.
audio_key = 'audio_key'
custom_call_shape = (5, 25)
# Custom call function for embedding generation.
def test_call_fn(audio_samples, sample_rate, module_location, output_key,
name):
"""Mock waveform-to-embedding computation."""
del audio_samples, sample_rate, module_location, output_key, name
return np.zeros(custom_call_shape, dtype=np.float32)
# Construct the tf.train.Exam
|
ple test data.
ex = tf.train.Example()
ex.features.feature[audio_key].float_list.value.extend(
np.zeros(2000, np.float32))
if sample_rate_key:
ex.features.feature[sample_rate_key].int64_list.value.append(8000)
old_k = 'oldkey'
do_fn = beam_dofns.ComputeEmbeddingMapFn(
name='module_name',
module='@loc',
output_key='unnecessary',
audio_key=audio_key,
sample_rate_key=sample_rate_key,
sample_rate=sample_rate,
average_over_time=average_over_time,
module_call_fn=test_call_fn,
setup_fn=lambda _: None)
do_fn.setup()
new_k, new_v = next(do_fn.process((old_k, ex)))
self.assertEqual(new_k, old_k)
expected_shape = (
1, custom_call_shape[1]) if average_over_time else custom_call_shape
self.assertEqual(new_v.shape, expected_shape)
@parameterized.parameters(
{'average_over_time': True, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': 's', 'sample_rate': None},
{'average_over_time': False, 'sample_rate_key': None, 'sample_rate': 5},
) # pylint:disable=g-unreachable-test-method
def disable_test_compute_embedding_map_fn_tflite(
self, average_over_time, sample_rate_key, sample_rate):
# Establish required key names.
audio_key = 'audio_key'
# Construct the tf.train.Example test data.
ex = tf.train.Example()
ex.features.feature[audio_key].float_list.value.extend(
np.zeros(2000, np.float32))
if sample_rate_key:
ex.features.feature[sample_rate_key].int64_list.value.append(8000)
old_k = 'oldkey'
def _feature_fn(x, s):
return tf.expand_dims(
tf_frontend.compute_frontend_features(x, s, frame_hop=17),
axis=-1).numpy().astype(np.float32)
do_fn = beam_dofns.ComputeEmbeddingMapFn(
name='module_name',
module='file.tflite',
output_key=0,
audio_key=audio_key,
sample_rate_key=sample_rate_key,
sample_rate=sample_rate,
average_over_time=average_over_time,
feature_fn=_feature_fn,
module_call_fn=_s2e,
setup_fn=build_tflite_interpreter_dummy)
do_fn.setup()
new_k, new_v = next(do_fn.process((old_k, ex)))
self.assertEqual(new_k, old_k)
expected_shape = (1, BASE_SHAPE_[1]) if average_over_time else BASE_SHAPE_
self.assertEqual(new_v.shape, expected_shape)
@parameterized.parameters([
{'chunk_len': 0, 'average_over_time': True, 'emb_on_chnks': True},
{'chunk_len': 8000, 'average_over_time': True, 'emb_on_chnks': True},
{'chunk_len': 0, 'average_over_time': True, 'emb_on_chnks': False},
{'chunk_len': 8000, 'average_over_time': True, 'emb_on_chnks': False},
])
def test_chunk_audio(self, chunk_len, average_over_time, emb_on_chnks):
dofn = beam_dofns.ChunkAudioAndComputeEmbeddings(
name='all',
module='dummy_name',
output_key=['okey1', 'okey2'],
embedding_names=['em1', 'em2'],
audio_key='audio',
label_key='label',
speaker_id_key='speaker_id',
sample_rate_key=None,
sample_rate=16000,
average_over_time=average_over_time,
chunk_len=chunk_len,
compute_embeddings_on_chunked_audio=emb_on_chnks,
setup_fn=lambda _: MockModule(['okey1', 'okey2']))
dofn.setup()
for l in [8000, 16000, 32000]:
k = f'key_{l}'
ex = make_tfexample(l)
for i, (kn, aud, lbl, spkr, embs_d) in enumerate(dofn.process((k, ex))):
self.assertEqual(f'{k}_{i}', kn)
if chunk_len:
expected_chunk_len = chunk_len if l > chunk_len else l
else:
expected_chunk_len = l
self.assertLen(aud, expected_chunk_len)
self.a
|
ChantyTaguan/zds-site
|
zds/mp/validators.py
|
Python
|
gpl-3.0
| 3,768
| 0.002132
|
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext_lazy as _
from zds.api.validators import Validator
from zds.member.models import Profile
class ParticipantsUserValidator(Validator):
can_be_empty = False
def validate_participants(self, value):
msg = None
if value or self.can_be_empty:
for participant in value:
if participant.username == self.get_current_user().username:
msg = _("Vous ne pouvez pas vous écrire à vous-même !")
try:
current = get_object_or_404(Profile, user__username=participant)
if not Profile.objects.contactable_members().filter(pk=current.pk).exists():
msg = _("Vous avez tenté d'ajouter un utilisateur injoignable.")
except Http404:
msg = _(f"Un des participants saisi est introuvable ({participant}).")
else:
msg = _("Vous devez spécifier des participants.")
if msg is not None:
self.throw_error("participants", msg)
return value
def get_current_user(se
|
lf):
raise NotImplementedError("`get_current_user()` must be implemented.")
class ParticipantsStringValidator(Validator):
"""
Validates participants field of a MP.
"""
def validate_participants(self, value, username):
"""
Checks about participants.
:param value: participants value
:return: participants value
"""
msg = None
|
if value:
participants = value.strip()
if participants != "":
if len(participants) == 1 and participants[0].strip() == ",":
msg = _("Vous devez spécfier des participants valides.")
for participant in participants.split(","):
participant = participant.strip()
if not participant:
continue
if participant.strip().lower() == username.lower():
msg = _("Vous ne pouvez pas vous écrire à vous-même !")
try:
current = get_object_or_404(Profile, user__username=participant)
if not Profile.objects.contactable_members().filter(pk=current.pk).exists():
msg = _("Vous avez tenté d'ajouter un utilisateur injoignable.")
except Http404:
msg = _(f"Un des participants saisi est introuvable ({participant}).")
else:
msg = _("Le champ participants ne peut être vide.")
if msg is not None:
self.throw_error("participants", msg)
return value
class TitleValidator(Validator):
"""
Validates title field of a MP.
"""
def validate_title(self, value):
"""
Checks about title.
:param value: title value
:return: title value
"""
msg = None
if value:
if not value.strip():
msg = _("Le champ titre ne peut être vide.")
if msg is not None:
self.throw_error("title", msg)
return value
class TextValidator(Validator):
"""
Validates text field of a MP.
"""
def validate_text(self, value):
"""
Checks about text.
:param value: text value
:return: text value
"""
msg = None
if value:
if not value.strip():
msg = _("Le champ text ne peut être vide.")
if msg is not None:
self.throw_error("text", msg)
return value
|
pbabik/OGCServer
|
tests/testWsgi.py
|
Python
|
bsd-3-clause
| 1,893
| 0.005811
|
import nose
def start_response_111(status, headers):
for header in headers:
if header[0] == 'Content-Type':
assert header[1] == 'application/vnd.ogc.wms_xml'
assert status == '200 OK'
def start_response_130(status, headers):
for header in headers:
if header[0] == 'Content-Type':
assert header[1] == 'text/xml'
assert status == '200 OK'
def start_response_check_404(status, headers):
print('status code: %s' % status)
assert status == '404 NOT FOUND'
def get_wsgiapp():
import os
from ogcserver.wsgi import WSGIApp
base_path, tail = os.path.split(__file__)
wsgi_app = WSGIApp(os.path.join(base_path, 'ogcserver.conf'))
return wsgi_app
def get_environment():
environ = {}
environ['HTTP_HOST'] = "localhost"
environ['SCRIPT_NAME'] = __name__
environ['PATH_INFO'] = '/'
return environ
def test_get_capabilities():
wsgi_app = get_wsgiapp()
environ = get_environment()
enviro
|
n['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.1.1&SERVICE=WMS&REQUEST=GetCapabilities&"
response = wsgi_app.__call__(environ, start_response_111)
content = ''.join(response)
environ['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.3.0&SERVICE=WMS&REQUEST=GetCapabilities&"
response = wsgi_app.__call__(environ, start_response_130)
''.join(response)
def test_bad_query():
wsgi_app = get_wsgiapp()
environ = get_environmen
|
t()
environ['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.1.1&SERVICE=WMS&REQUEST=GetMap&"
response = wsgi_app.__call__(environ, start_response_check_404)
environ['QUERY_STRING'] = "EXCEPTION=application/vnd.ogc.se_xml&VERSION=1.3.0&SERVICE=WMS&REQUEST=GetMap&"
response = wsgi_app.__call__(environ, start_response_check_404)
|
hzlf/openbroadcast.ch
|
app/routing.py
|
Python
|
gpl-3.0
| 606
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from channels.staticfiles import StaticFilesWrapper, StaticFilesHandler
from chat.consumers import ChatJSONConsumer
from rating.consumers import RatingJSONConsumer
application = StaticFilesWrapper(ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
|
URLRouter([
url('^ws/chat/$', ChatJSONConsumer),
url('^ws/rating/$', RatingJSO
|
NConsumer),
])
),
}))
|
psiq/gdsfactory
|
pp/mask/merge_metadata.py
|
Python
|
mit
| 914
| 0.003282
|
import pp
from pp.mask.merge_json import merge_json
from pp.mask.merge_markdown import merge_markdown
from pp.mask.merge_test_metadata import merge_test_metadata
from pp.mask.write_labels import write_labels
def merge_metadata(gdspath, labels_prefix="opt", label_layer=pp.LAYER.LABEL, **kwargs):
mdpath = gdspath.with_suffix(".md")
jsonpath = gdspath.with_suffix(".json")
build_directory = gdspath.parent.parent
doe_directory = build_directory / "doe"
write_labels(gdspath=gdspath, prefix=labels_prefix, label_layer=label_layer)
merge_json
|
(doe_directory=doe_directory, jsonpath=jsonpath, **kwargs)
merge_markdown(reports_directory=doe_directory, mdpath=mdpath)
merge_test_metadata(gdspath, labels_prefix=labels_prefix)
if __name__ == "__main__":
gdspath = pp.CONFIG["samples_path"] / "mask" / "build" / "mask" / "mask.gds"
|
print(gdspath)
merge_metadata(gdspath)
|
dleicht/PSB
|
PyZenity.py
|
Python
|
mit
| 15,175
| 0.005074
|
################################################################################
# Name: PyZenity.py
# Author: Brian Ramos
# Created: 10/17/2005
# Revision Information:
# $Date: $
# $Revision: $
# $Author: bramos $
#
# Licence: MIT Licence
#
# Copyright (c) 2010 Brian Ramos
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARR
|
ANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
################################################################################
fro
|
m datetime import date
from subprocess import Popen, PIPE
from itertools import chain
from os import path
__all__ = ['GetDate', 'GetFilename', 'GetDirectory', 'GetSavename', 'GetText',
'InfoMessage', 'Question', 'Warning', 'ErrorMessage',
'Notification', 'TextInfo', 'Progress','List' ]
__doc__ = """PyZenity is an easy to use interface to Zenity for Python.
Zenity is normally called from scripts by invoking it with a multitude of
command line parameters that it uses to construct its interfaces. This
module hides the details of invoking the command and presents simple API
functions like:
cancel = Question('Should I cancel the operation?')
Each function takes optional kwargs parameters. This is to allow the use of
general Zenity parameters such as:
title - Set the dialog title
window_icon - Set the window icon
ok_label - Set the text for the Ok label
cancel_label - Set the text for the Cancel label
height - Set the height
width - Set the width
timeout - Set the dialog timeout in seconds"""
zen_exec = 'zenity'
def run_zenity(type, *args):
return Popen([zen_exec, type] + list(args), stdin=PIPE, stdout=PIPE)
# This is a dictionary of optional parameters that would create
# syntax errors in python if they were passed in as kwargs.
kw_subst = {
'window_icon': 'window-icon',
'ok_label': 'ok-label',
'cancel_label': 'cancel-label'
}
def kwargs_helper(kwargs):
"""This function preprocesses the kwargs dictionary to sanitize it."""
args = []
for param, value in kwargs.items():
param = kw_subst.get(param, param)
args.append((param, value))
return args
def GetDate(text=None, selected=None, **kwargs):
"""Prompt the user for a date.
This will raise a Zenity Calendar Dialog for the user to pick a date.
It will return a datetime.date object with the date or None if the
user hit cancel.
text - Text to be displayed in the calendar dialog.
selected - A datetime.date object that will be the pre-selected date.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--date-format=%d/%m/%Y']
if text:
args.append('--text=%s' % text)
if selected:
args.append('--day=%d' % selected.day)
args.append('--month=%d' % selected.month)
args.append('--year=%d' % selected.year)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--calendar', *args)
if p.wait() == 0:
retval = p.stdout.read().strip()
day, month, year = [int(x) for x in retval.split('/')]
return date(year, month, day)
def GetFilename(multiple=False, sep='|', **kwargs):
"""Prompt the user for a filename.
This will raise a Zenity File Selection Dialog. It will return a list with
the selected files or None if the user hit cancel.
multiple - True to allow the user to select multiple files.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if multiple:
args.append('--multiple')
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read()[:-1].split('|')
def GetDirectory(multiple=False, selected=None, sep=None, **kwargs):
"""Prompt the user for a directory.
This will raise a Zenity Directory Selection Dialog. It will return a
list with the selected directories or None if the user hit cancel.
multiple - True to allow the user to select multiple directories.
selected - Path to the directory to be selected on startup.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--directory']
if multiple:
args.append('--multiple')
if selected:
if not path.lexists(selected):
raise ValueError("File %s does not exist!" % selected)
args.append('--filename=%s' % selected)
if sep:
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def GetSavename(default=None, **kwargs):
"""Prompt the user for a filename to save as.
This will raise a Zenity Save As Dialog. It will return the name to save
a file as or None if the user hit cancel.
default - The default name that should appear in the save as dialog.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--save']
if default:
args.append('--filename=%s' % default)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def Notification(text=None, window_icon=None, **kwargs):
"""Put an icon in the notification area.
This will put an icon in the notification area and return when the user
clicks on it.
text - The tooltip that will show when the user hovers over it.
window_icon - The stock icon ("question", "info", "warning", "error") or
path to the icon to show.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if window_icon:
args.append('--window-icon=%s' % window_icon)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--notification', *args)
p.wait()
def List(column_names, title=None, boolstyle=None, editable=False,
select_col=None, sep='|', data=[], **kwargs):
"""Present a list of items to select.
This will raise a Zenity List Dialog populated with the colomns and rows
specified and return either the cell or row that was selected or None if
the user hit cancel.
column_names - A tuple or list containing the names of the columns.
title - The title of the dialog box.
boolstyle - Whether the first columns should be a bool option (
|
janusnic/shoop
|
shoop/front/apps/customer_information/urls.py
|
Python
|
agpl-3.0
| 432
| 0
|
# -*- coding:
|
utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^customer/$', views.CustomerEditView.as_view(),
|
name='customer_edit'),
)
|
jakevdp/bokeh
|
sphinx/source/tutorial/solutions/olympics.py
|
Python
|
bsd-3-clause
| 3,262
| 0.003679
|
import json
import numpy as np
from bokeh.plotting import *
from bokeh.sampledata.olympics2014 import data
from bokeh.objects import ColumnDataSource
data = { d['abbr']: d['medals'] for d in data['data'] if d['medals']['total'] > 0}
# pull out just the data we care about
countries = sorted(
data.keys(),
key=lambda x: data[x]['total'], reverse=True
)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
# EXERCISE: output static HTML file
output_file('olympics.html')
# EXERCISE: turn on plot hold
hold()
# use the `rect` renderer to display stacked bars of the medal results. Note
# that we set y_range explicitly on the first renderer
rect(x=countries, y=bronze/2, width=0.8, height=bronze, x_range=countries, color="#CD7F32", alpha=0.6,
background_fill='#59636C', title="Olympic Medals by Country (stacked)", tools="",
y_range=[0, max(gold+silver+bronze)], plot_width=800)
rect(x=countries, y=bronze+silver/2, width=0.8, height=silver, x_range=countries, color="silver", alpha=0.6)
# EXERCISE: add a `rect` renderer to stack the gold medal results
rect(x=countries, y=bronze+silver+gold/2, width=0.8, height=gold, x_range=countries, color="gold", alpha=0.6)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the grid lines
# - change the major label standoff, and major_tick_out values
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
xgrid().grid_line_color = None
axis().major_label_text_font_size = "8pt"
axis().major_label_standoff = 0
xaxis().majo
|
r_label_orientation = np.pi/3
xaxis().major_label_standoff =
|
6
xaxis().major_tick_out = 0
# EXERCISE: create a new figure
figure()
# Categorical percentage coordinates can be used for positioning/grouping
countries_bronze = [c+":0.3" for c in countries]
countries_silver = [c+":0.5" for c in countries]
countries_gold = [c+":0.7" for c in countries]
# EXERCISE: re create the medal plot, but this time:
# - do not stack the bars on the y coordinate
# - use countries_gold, etc. to positions the bars on the x coordinate
rect(x=countries_bronze, y=bronze/2, width=0.2, height=bronze, x_range=countries, color="#CD7F32", alpha=0.6,
background_fill='#59636C', title="Olympic Medals by Country (grouped)", tools="",
y_range=[0, max([gold.max(), silver.max(), bronze.max()])], plot_width=1000, plot_height=300)
rect(x=countries_silver, y=silver/2, width=0.2, height=silver, x_range=countries, color="silver", alpha=0.6)
rect(x=countries_gold, y=gold/2, width=0.2, height=gold, x_range=countries, color="gold", alpha=0.6)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
xgrid().grid_line_color = None
axis().major_label_text_font_size = "8pt"
axis().major_label_standoff = 0
xaxis().major_label_orientation = np.pi/3
xaxis().major_label_standoff = 6
xaxis().major_tick_out = 0
show() # show the plot
|
jessamynsmith/boards-backend
|
blimp_boards/cards/urls.py
|
Python
|
agpl-3.0
| 471
| 0
|
from django
|
.conf.urls import patterns, url
from rest_framework.routers import DefaultRouter
from ..boards.views import BoardHTMLView
from . import views
router = DefaultRouter()
router.register(r'cards', views.CardViewSet)
api_urlpatterns = router.urls
urlpatterns = patterns(
# Prefix
'',
url(r'^$',
BoardHTMLView.as_view(), name='card_detail'),
url(r'^download/$',
views.CardDownloadHTMLView.as_view(), name='car
|
d_download'),
)
|
Microsoft/pxt
|
tests/pyconverter-test/cases/empty_array_declaration.py
|
Python
|
mit
| 40
| 0.025
|
def f1():
|
while True:
|
x = []
|
tidepool-org/dfaker
|
tests/test_common_fields.py
|
Python
|
bsd-2-clause
| 1,297
| 0.000771
|
from chai import Chai
import unittest
import dfaker.common_fields as common_fields
import dfaker.tools as tools
class Test_Common_Fields(Chai):
def test_common_fields(self):
""" Test that common fields populate properly"""
name = "bolus"
datatype = {}
timestamp = tools.convert_ISO_to_epoch('2015-03-03 00:00:00',
'%Y-%m-%d %H:%M:%S')
zonename = "US/Pacific"
expected = {
'time': '2015-03-03T00:00:00.000Z', # UTC time
'deviceTime': '2015-03-02T16:00:00', # local time
'timezoneOffset': -480,
'deviceId': 'DemoData-123456789',
'uploadId': 'upid_abcdefghijklmnop',
'conversionOffset': 0,
|
}
result_dict = common_fields.add_common_fields(name, datatype,
timestamp, zonename)
for key in expected.keys():
self.assertEqual(result_dict[key], expected[key])
def
|
suite():
""" Gather all the tests from this module in a test suite """
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(Test_Common_Fields))
return test_suite
mySuit = suite()
runner = unittest.TextTestRunner()
runner.run(mySuit)
|
luisalves05/shortener-url
|
src/apps/miudo/views.py
|
Python
|
mit
| 1,890
| 0.013757
|
import uuid
from random import randint
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Url
def index(request):
if request.session.has_key("has_url"):
url = request.session.get("has_url")
del request.session['has_url']
return render(request, "miudo/index.html", locals())
return render(request, "miudo/index.html", {})
def
|
make_url(request):
if reques
|
t.method == "POST":
url = None # initial url
url_site = request.POST['url']
url_id = generate_key()
try:
url = Url.objects.get(url_id = url_id)
while url:
url_id = generate_key()
url = Url.objects.get(url_id = url_id)
create_url(request, url_id, url_site)
request.session["has_url"] = url_id
except Url.DoesNotExist:
create_url(request, url_id, url_site)
request.session["has_url"] = url_id
return HttpResponseRedirect("/")
def create_url(custom_request, url_id, url_site):
if custom_request.user.is_authenticated():
url = Url.objects.create(url_id = url_id, url_site = url_site,
url_author = custom_request.user)
else:
url = Url.objects.create(url_id = url_id, url_site = url_site)
url.save()
def generate_key():
to_choose = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
url_id = ""
while len(url_id) != 6:
i = randint(0, len(to_choose) - 1)
url_id += to_choose[i]
return url_id
def redirect_url(request, url_id=None):
try:
url = Url.objects.get(url_id = url_id)
url.url_clicked = url.url_clicked + 1
url.save()
except Url.DoesNotExist:
return render(request, "base/page_not_found.html", {})
return HttpResponseRedirect(url.url_site)
|
iksaif/euscan
|
euscanwww/euscan_captcha/urls.py
|
Python
|
gpl-2.0
| 236
| 0
|
from django
|
.conf.urls import patterns, url
from views import RecaptchaRegistrationView
urlpatterns = patterns(
'', url(
r'^register/$',
RecaptchaRegistrationView.as_view(),
name='registration_regi
|
ster'),
)
|
TOC-Shard/moul-scripts
|
Python/tldnShroomieGate.py
|
Python
|
gpl-3.0
| 4,105
| 0.005359
|
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: tldnShroomieGate
Age: Teledahn
Date: Feburary 2007
Author: Karl Johnson
"""
from Plasma import *
from PlasmaTypes import *
# define the attributes that will be entered in max
clkLever = ptAttribActivator(1,"clk: Activator for Shroomie Gate")
respLeverPull = ptAttribResponder(2, "resp: Lever Pull", netForce=1)
respGateDown = ptAttribResponder(3, "resp: Gate Down", netForce=1)
respGateUp = ptAttribResponder(4, "resp: Gate Up", netForce=1)
class tldnShroomieGate(ptResponder):
def __init__(self):
# run parent class init
ptResponder.__init__(self)
self.id = 5042
version = 1
self.version = version
print "__init__tldnShroomieGate v.", version
def OnNotify(self,state,id,events):
if id == clkLever.id and state:
print "tldnShroomieGate:\t---Someone Pulled the Lever"
respLeverPull.run(self.key,avatar=PtFindAvatar(events))
elif id == respLeverPull.id:
ageSDL = PtGetAgeSDL()
PtDebugPrint("tldnShroomieGate:\t---Shroomie Gate Up SDL: %d" % (ageSDL["tldnShroomieGateUp"][0]))
if ageSDL["tldnShroomieGatePowerOn"][0] and self.sceneobject.isLocallyOwned():
if ageSDL["tldnShroomieGateUp"][0]:
respGateDown.run(self.key)
print "tldnShroomieGate:\t---Shroomie Gate Going Down"
else:
respGateUp.run(self.key)
print "tldnShroomieGate:\t---Shroomie Gate Going Up"
ageSDL["tldnShroomieGateUp"] = (not ageSDL["tldnShroomieGateUp"][0],)
def OnServerInitComplete(self):
try:
ageSDL = PtGetAgeSDL()
except:
print "tldnShroomieGate:\tERROR---Cannot find the Teledahn Age SDL"
ageSDL.sendToClients("tldnShroomieGateUp")
ageSDL.setFlags("tldnShroomieGateUp", 1, 1)
ageSDL.setNotify(self.key, "tldnShroomieGateUp", 0.0)
if age
|
SDL["tldnShroomieGateUp"][0]:
print "tldnShroomieGate:\tInit---Shroomie Gate Up"
respGateUp.run(self.key,fastforward=1)
else:
print "tldnShroomieGate:\tInit---Shroomie Gate Down"
respGateDown.run(self.key,fastforwar
|
d=1)
|
paera/django-static-precompiler
|
static_precompiler/tests/test_management.py
|
Python
|
mit
| 1,188
| 0.001684
|
from django.core.management import call_command
from static_precompiler.management.commands.compilestatic import get_scanned_dirs
from static_precompiler.settings import STATIC_ROOT, ROOT, OUTPUT_DIR
import pytest
import os
def test_get_scanned_dirs():
assert get_scanned_dirs() == sorted([
os.path.join(os.path.dir
|
name(__file__), "staticfiles_dir"),
os.path.join(o
|
s.path.dirname(__file__), "staticfiles_dir_with_prefix"),
STATIC_ROOT
])
@pytest.mark.django_db
def test_compilestatic_command():
call_command("compilestatic")
output_path = os.path.join(ROOT, OUTPUT_DIR)
compiled_files = []
for root, dirs, files in os.walk(output_path):
for filename in files:
compiled_files.append(os.path.join(root[len(output_path):].lstrip("/"), filename))
compiled_files.sort()
assert compiled_files == [
"another_test.js",
"scripts/test.js",
"styles/imported.css",
"styles/stylus/A.css",
"styles/stylus/B/C.css",
"styles/stylus/D.css",
"styles/stylus/E/F.css",
"styles/stylus/E/index.css",
"styles/test.css",
"test-compass.css",
]
|
jeroenseegers/git-history
|
git_history.py
|
Python
|
mit
| 1,296
| 0
|
import os
import subprocess
import sys
__version__ = '0.0.1'
__author__ = 'Jeroen Seegers'
__license__ = 'MIT'
HOME_DIR = os.path.expanduser('~')
HISTORY_FILE = HOME_DIR + '/.git-history.log'
def ensure_history_file():
"""Ensure the history file exists"""
if not os.path.isfile(HISTORY_FILE) and os.access(HOME_DIR, os.W_OK):
open(HISTORY_FILE, 'a').close()
return True
elif os.path.isfile(HISTORY_FILE) and os.access(HOME_DIR, os.W_OK):
return True
elif os.path.isfile(HISTORY_FILE) and not os.access(HOME_DIR, os.W_OK):
return False
else:
return False
def track_history():
argum
|
ents = sys.argv[1:]
arguments.insert(0, 'git')
if arguments == ['git', 'history']:
# Show the history so far
with open(HISTORY_FILE, 'r') as f:
print f.read()
f.close()
elif len(arguments) > 1:
# Store command in history
if ensure_history_file():
with open(HISTORY_FILE, 'a') as f:
f.write('{0}\n'.forma
|
t(' '.join(sys.argv[1:])))
f.close()
# Execute given command
subprocess.call(arguments)
else:
# Show default help text
subprocess.call('git')
if __name__ == '__main__':
track_history()
|
fnp/wolnelektury
|
src/club/migrations/0011_fix_notification_body.py
|
Python
|
agpl-3.0
| 585
| 0
|
# Generated
|
by Django 2.2.5 on 2019-09-30 13:02
from django.db import migrations
def fix_notification_body(apps, schema_editor):
PayUNotification = apps.get_model('club', 'PayUNotification')
for n in PayUNotifica
|
tion.objects.filter(body__startswith='b'):
n.body = n.body[2:-1]
n.save()
class Migration(migrations.Migration):
dependencies = [
('club', '0010_auto_20190529_0946'),
]
operations = [
migrations.RunPython(
fix_notification_body,
migrations.RunPython.noop,
elidable=True),
]
|
bittner/django-media-tree
|
media_tree/contrib/cms_plugins/media_tree_slideshow/__init__.py
|
Python
|
bsd-3-clause
| 903
| 0.007752
|
"""
Plugin: Slideshow
*****************
This plugin allows you to put a slideshow on a page, automatically
displaying the selected image files with customizable transitions and
intervals.
Installation
============
To use this plugin, put ``media_tree.contrib.cms_plugins.media_tree_slideshow``
in your installed apps, and run ``manage.py syncdb``.
Template
========
Override the template ``cms/plugins/media_tree_slideshow.html`` if you want to
customize the output. Please take a look at the default template for more
information.
By def
|
ault, images are rendered to the output using the template
``media_tree/filenode/includes
|
/figure.html``, which includes captions.
.. Note::
The default template requires you to include `jQuery <http://jquery.com/>`_
in your pages, since it uses the `jQuery Cycle Plugin
<http://jquery.malsup.com/cycle/>`_ (bundled) for image transitions.
"""
|
brosner/nip
|
setup.py
|
Python
|
mit
| 461
| 0.043384
|
from distutils.core import setup
setup(
name
|
= "nip",
version = "0.1a1",
py_modules = [
"nip",
],
scripts = [
"bin/nip",
],
author = "Brian Rosner",
author_email = "brosner@gmail.com",
description = "nip is environment isolation and installation for Node.js",
long_description = open("README.rst").read(),
license = "MIT",
classifiers = [
"Development Status :: 2 - Pre-
|
Alpha",
],
)
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfGMLSupport/GmlFill.py
|
Python
|
mit
| 6,721
| 0.002381
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlFill(IdentifiedObject):
"""Specifies how the area of the geometry will be filled.Specifies how the area of the geometry will be filled.
"""
def __init__(self, opacity=0.0, GmlColour=None, GmlMarks=None, GmlTextSymbols=None, GmlSvgParameters=None, GmlPolygonSymbols=None, *args, **kw_args):
"""Initialises a new 'GmlFill' instance.
@param opacity: Specifies the level of translucency to use when rendering the Fill. The value is encoded as a floating-point value between 0.0 and 1.0 with 0.0 representing completely transparent and 1.0 representing completely opaque, with a linear scale of translucency for intermediate values. The default value is 1.0
@param GmlColour:
@param GmlMarks:
@param GmlTextSymbols:
@param GmlSvgParameters:
@param GmlPolygonSymbols:
"""
#: Specifies the level of translucency to use when rendering the Fill. The value is encoded as a floating-point value between 0.0 and 1.0 with 0.0 representing completely transparent and 1.0 representing completely opaque, with a linear scale of translucency for intermediate values. The default value is 1.0
self.opacity = opacity
self._GmlColour = None
self.GmlColour = GmlColour
self._GmlMarks = []
self.GmlMarks = [] if GmlMarks is None else GmlMarks
self._GmlTextSymbols = []
self.GmlTextSymbols = [] if GmlTextSymbols is None else GmlTextSymbols
self._GmlSvgParameters = []
self.GmlSvgParameters = [] if GmlSvgParameters is None else GmlSvgParameters
self._GmlPolygonSymbols = []
self.GmlPolygonSymbols = [] if GmlPolygonSymbols is None else GmlPolygonSymbols
super(GmlFill, self).__init__(*args, **kw_args)
_attrs = ["opacity"]
_attr_types = {"opacity": float}
_defaults = {"opacity": 0.0}
_enums = {}
_refs = ["GmlColour", "GmlMarks", "GmlTextSymbols", "GmlSvgParameters", "GmlPolygonSymbols"]
_many_refs = ["GmlMarks", "GmlTextSymbols", "GmlSvgParameters", "GmlPolygonSymbols"]
def getGmlColour(self):
return self._GmlColour
def setGmlColour(self, value):
if self._GmlColour is not None:
filtered = [x for x in self.GmlColour.GmlFills if x != self]
self._GmlColour._GmlFills = filtered
self._GmlColour = value
if self._GmlColour is not None:
if self not in self._GmlColour._GmlFills:
self._GmlColour._GmlFills.append(self)
GmlColour = property(getGmlColour, setGmlColour)
def getGmlMarks(self):
return
|
self._GmlMarks
def setGmlMarks(self, value):
for p in self._GmlMarks:
filtered = [q
|
for q in p.GmlFIlls if q != self]
self._GmlMarks._GmlFIlls = filtered
for r in value:
if self not in r._GmlFIlls:
r._GmlFIlls.append(self)
self._GmlMarks = value
GmlMarks = property(getGmlMarks, setGmlMarks)
def addGmlMarks(self, *GmlMarks):
for obj in GmlMarks:
if self not in obj._GmlFIlls:
obj._GmlFIlls.append(self)
self._GmlMarks.append(obj)
def removeGmlMarks(self, *GmlMarks):
for obj in GmlMarks:
if self in obj._GmlFIlls:
obj._GmlFIlls.remove(self)
self._GmlMarks.remove(obj)
def getGmlTextSymbols(self):
return self._GmlTextSymbols
def setGmlTextSymbols(self, value):
for x in self._GmlTextSymbols:
x.GmlFill = None
for y in value:
y._GmlFill = self
self._GmlTextSymbols = value
GmlTextSymbols = property(getGmlTextSymbols, setGmlTextSymbols)
def addGmlTextSymbols(self, *GmlTextSymbols):
for obj in GmlTextSymbols:
obj.GmlFill = self
def removeGmlTextSymbols(self, *GmlTextSymbols):
for obj in GmlTextSymbols:
obj.GmlFill = None
def getGmlSvgParameters(self):
return self._GmlSvgParameters
def setGmlSvgParameters(self, value):
for p in self._GmlSvgParameters:
filtered = [q for q in p.GmlFills if q != self]
self._GmlSvgParameters._GmlFills = filtered
for r in value:
if self not in r._GmlFills:
r._GmlFills.append(self)
self._GmlSvgParameters = value
GmlSvgParameters = property(getGmlSvgParameters, setGmlSvgParameters)
def addGmlSvgParameters(self, *GmlSvgParameters):
for obj in GmlSvgParameters:
if self not in obj._GmlFills:
obj._GmlFills.append(self)
self._GmlSvgParameters.append(obj)
def removeGmlSvgParameters(self, *GmlSvgParameters):
for obj in GmlSvgParameters:
if self in obj._GmlFills:
obj._GmlFills.remove(self)
self._GmlSvgParameters.remove(obj)
def getGmlPolygonSymbols(self):
return self._GmlPolygonSymbols
def setGmlPolygonSymbols(self, value):
for x in self._GmlPolygonSymbols:
x.GmlFill = None
for y in value:
y._GmlFill = self
self._GmlPolygonSymbols = value
GmlPolygonSymbols = property(getGmlPolygonSymbols, setGmlPolygonSymbols)
def addGmlPolygonSymbols(self, *GmlPolygonSymbols):
for obj in GmlPolygonSymbols:
obj.GmlFill = self
def removeGmlPolygonSymbols(self, *GmlPolygonSymbols):
for obj in GmlPolygonSymbols:
obj.GmlFill = None
|
wbolster/whip
|
whip/db.py
|
Python
|
bsd-3-clause
| 12,080
| 0
|
"""
Whip database storage module.
All IP ranges with associated information are stored in a LevelDB
database.
Some remarks about the construction of database records:
* The most recent version of a record is stored in full, and older dicts
are stored in a history structure of (reverse) diffs. This saves a lot
of storage space and positively affects performance, since the
benefits of storing/retrieving less data outweigh the cost of
reconstructing historical records for historical lookups.
* Before creating diffs, the data will be deduplicated by
'squashing' unchanged records and only storing new versions when they
were first seen. Since each dict will have a different timestamp, the
datetime will be ignored while deduplicating.
The key/value layout is as follows:
* The end IP is used as the key. This allows for fast lookups since it
requires only a single seek and a single record.
* The begin IP and the actual information is stored in the value,
packed using Msgpack like this:
* IP begin address
* JSON encoded data for the latest version
* Latest datetime
* Msgpack encoded diffs for older versions (yes, Msgpack in Msgpack,
since this nested structure is not always needed and lets us decode
it explicitly)
Note the odd mix of JSON and Msgpack encoding. Encoding/decoding speeds
are comparable (when using ujson), but Msgpack uses less space and hence
makes LevelDB faster, so that is the preferred format. The one exception
is the 'latest version' data, which is encoded using JSON, since that
saves a complete decode/encode (from Msgpack to JSON) roundtrip when
executing queries asking for the most recent version.
"""
import functools
import logging
import operator
import msgpack
from msgpack import loads as msgpack_loads
import plyvel
from .json import dumps as json_dumps, loads as json_loads
from .util import (
dict_diff_incremental,
dict_patch_incremental,
ip_packed_to_int,
ip_int_to_packed,
ip_int_to_str,
ip_str_to_packed,
merge_ranges,
PeriodicCallback,
unique_justseen,
)
DATETIME_GETTER = operator.itemgetter('datetime')
logger = logging.getLogger(__name__)
msgpack_dumps = msgpack.Packer().pack # faster than calling .packb()
msgpack_dumps_utf8 = msgpack.Packer(encoding='UTF-8').pack # idem
msgpack_loads_utf8 = functools.partial(
msgpack_loads,
use_list=False,
encoding='UTF-8')
def debug_format_dict(d): # pragma: no cover
"""Formatting function for debugging purposes"""
return ', '.join('%s=%s' % (k[:1], v or '') for k, v in sorted(d.items()))
def make_squash_key(d):
"""Dict squashing key function"""
# Compare all data except for the 'datetime' key
d = d.copy()
d.pop('datetime')
return d
def build_key_value(begin_ip_int, end_ip_int, latest_json, latest_datetime,
history_msgpack):
"""Build the actual key and value byte strings"""
key = ip_int_to_packed(end_ip_int)
value = msgpack_dumps((
ip_int_to_packed(begin_ip_int),
latest_json,
latest_datetime.encode('ascii'),
history_msgpack,
))
return key, value
def build_history(dicts):
"""Build a history structure"""
dicts.sort(key=DATETIME_GETTER)
unique_dicts = list(unique_justseen(dicts, key=make_squash_key))
unique_dicts.reverse()
latest, diffs_generator = dict_diff_incremental(unique_dicts)
diffs = list(diffs_generator)
return latest, diffs
def build_record(begin_ip_int, end_ip_int, dicts, existing=None):
"""Create database records for an iterable of merged dicts."""
assert dicts or existing, "no data at all to pack?"
if not dicts:
# No new dicts; avoid expensive re-serialisation. Note that
# blindly reusing the existing key/value pair from the database
# (by not updating it at all) is not correct: the begin and end
# of the range may have changed.
return build_key_value(
begin_ip_int,
end_ip_int,
existing.latest_json,
existing.latest_datetime,
existing.history_msgpack)
if not existing:
# Only new dicts, no existing data
latest, diffs = build_history(dicts)
return build_key_value(
begin_ip_int,
end_ip_int,
json_dumps(latest, ensure_ascii=False).encode('UTF-8'),
latest['datetime'],
msgpack_dumps_utf8(diffs))
# At this point we know there is both new data, and an existing
# record. These need to be merged..
if min(map(DATETIME_GETTER, dicts)) > existing.latest_datetime:
# All new data is newer than the existing record. Take
# a shortcut by simply prepending the new data to the history
# chain. This approach prevents quite a lot of overhead from
# build_history().
dicts.append(json_loads(existing.latest_json))
latest, diffs = build_history(dicts)
diffs.extend(msgpack_loads_utf8(existing.history_msgpack))
else:
# Perform a full merge
dicts.extend(existing.iter_versions())
latest, diffs = build_history(dicts)
return build_key_value(
begin_ip_int,
end_ip_int,
json_dumps(latest, ensure_ascii=False).encode('UTF-8'),
latest['datetime'],
msgpack_dumps_utf8(diffs))
class ExistingRecord(object):
"""Helper class for working with records retrieved from the database."""
def __init__(self, key, value):
# Performance note: except for the initial value unpacking, all
# expensive deserialization operations are deferred until
# requested.
unpacked = msgpack_loads(value, use_list=False)
# IP addresses
self.begin_ip_packed = unpacked[0]
self.end_ip_packed = key
# Actual data, without any expensive decoding applied
self.latest_json = unpacked[1]
self.latest_datetime = unpacked[2].decode('ascii')
self.history_msgpack = unpacked[3]
def iter_versions(self, inplace=False):
"""Lazily reconstruct all versions in this record."""
# Latest version
latest = json_loads(self.latest_json)
yield latest
# Reconstruct history by applying patches incrementally
yield from dict_patch_incremental(
latest,
|
msgpack_loads_utf8
|
(self.history_msgpack),
inplace=inplace)
class Database(object):
"""
Database access class for loading and looking up data.
"""
def __init__(self, database_dir, create_if_missing=False):
logger.debug("Opening database %s", database_dir)
self.db = plyvel.DB(
database_dir,
create_if_missing=create_if_missing,
write_buffer_size=16 * 1024 * 1024,
max_open_files=512,
lru_cache_size=128 * 1024 * 1024)
self.iter = None
def iter_records(self):
"""
Iterate a database and yield records that can be merged with new data.
This generator is suitable for consumption by merge_ranges().
"""
for key, value in self.db.iterator(fill_cache=False):
record = ExistingRecord(key, value)
yield (
ip_packed_to_int(record.begin_ip_packed),
ip_packed_to_int(record.end_ip_packed),
record,
)
def load(self, *iterables):
"""Load data from importer iterables"""
if not iterables:
logger.warning("No new input files; nothing to load")
return
# Combine new data with current database contents, and merge all
# iterables to produce unique, non-overlapping ranges.
iterables = list(iterables)
iterables.append(self.iter_records())
merged = merge_ranges(*iterables)
# Progress/status tracking
n_processed = n_updated = 0
begin_ip_int = 0
reporter = PeriodicCallback(lambda: logger.info(
"%d ranges processed (%d updated, %d new); current position %s",
n_processed, n_updated, n_processed - n_updated,
ip_int_to_str
|
jamslevy/gsoc
|
app/soc/models/ranker_root.py
|
Python
|
apache-2.0
| 1,077
| 0.0065
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the RankerRoot model
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
import soc.models.linkable
class RankerRoot(soc.models.linkable
|
.Linkable):
"""Links the Root of a RankList tree to an owner and also
gives it an unique ID.
"""
#: A required reference property to the root of the RankList tree
root = db.ReferenceProperty(required=True,
collection_name='roots')
|
mattsch/Sickbeard
|
cherrypy/test/test_auth_basic.py
|
Python
|
gpl-3.0
| 2,949
| 0.004747
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
from cherrypy.test import test
test.prefer_parent_path()
try:
from hashlib import md5
except ImportError:
# Python 2.4 and earlier
from md5 import new as md5
import cherrypy
from cherrypy.lib import auth_basic
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class BasicProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
class BasicProtected2:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
userpassdict = {'xuser' : 'xpassword'}
userhashdict = {'xuser' : md5('xpassword').hexdigest()}
def checkpasshash(realm, user, password):
p = userhashdict.get(user)
return p and p == md5(password).hexdigest() or False
conf = {'/basic': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': auth_basic.checkpassword_dict(userpassdict)},
'/basic2': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash},
}
root = Root()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
from cherrypy.test import helper
class BasicAuthTest(helper.CPWebCase):
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
|
self.assertBody('This is public.')
def testBasic(self):
self.getPage("/basic/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.as
|
sertStatus(401)
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2(self):
self.getPage("/basic2/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
if __name__ == "__main__":
helper.testmain()
|
cjaymes/pyscap
|
src/scap/model/oval_5/defs/windows/EntityStatePeSubsystemType.py
|
Python
|
gpl-3.0
| 1,004
| 0.000996
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP.
|
If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5 import PE_SUBSYSTEM_ENUMERATION
from scap.model.oval_5.defs.EntityStateType import EntityStateType
logger = logging.getLogger(__name__)
class EntityStatePeSubsystemType(EntityStateType):
MODEL_MAP = {
}
def get_value_enum(self):
return PE_SUBSYSTEM_ENUMERATION
|
nick6918/ThingCloud
|
ThingCloud/ThingCloud/urls.py
|
Python
|
mit
| 2,763
| 0.001448
|
"""ThingCloud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from Main
|
System.views import index
from AccountSystem.views import loginByPhone, register, sendCode, address, addressList, deleteAddress, changePassword, changeNickname, updateAvatar
from CloudList.views import addNewItem, getItemList, modifyNotes
from OrderSystem.views import generateOrder, modifyOrder, con
|
firmOrder, getOrderList, cancel, complain, update, orderCallback, getOrder, delete, vipCallback
from AssistSystem.views import feedback, checkDiscount, activityList, versionInfo, communityList,getFeeList, joinUs
from VIPSystem.views import vip, vipOrder, vipConfirm
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r"^index$", index),
url(r"^account/sendcode$", sendCode),
url(r"^account/register$", register),
url(r"^account/login$", loginByPhone),
url(r"^account/avatar$", updateAvatar),
url(r"^account/address$", address),
url(r"^account/addressdelete$", deleteAddress),
url(r"^account/addresslist$", addressList),
url(r"^account/password$", changePassword),
url(r"^account/nickname$", changeNickname),
url(r"^cloudlist/additem$", addNewItem),
url(r"^cloudlist/getlist$", getItemList),
url(r"^cloudlist/modifynotes$", modifyNotes),
url(r"^order/generate$", generateOrder),
url(r"^order/address$", modifyOrder),
url(r"^order/confirm$", confirmOrder),
url(r"^order/order$", getOrder),
url(r"^order/orderlist$", getOrderList),
url(r"^order/cancel$", cancel),
url(r"^order/complain$", complain),
url(r"^order/delete$", delete),
url(r"^order/update$", update),
url(r"^order/callback$", orderCallback),
url(r"^assist/feedback$", feedback),
url(r"^assist/discount$", checkDiscount),
url(r"^assist/activitylist", activityList),
url(r"^assist/version", versionInfo),
url(r"^assist/communitylist", communityList),
url(r"^assist/feelist", getFeeList),
url(r"^assist/joinus", joinUs),
url(r"^vip/vip$", vip),
url(r"^vip/order$", vipOrder),
url(r"^vip/confirm$", vipConfirm),
url(r"^vip/callback$", vipCallback),
]
|
chrisforrette/django-social-content
|
social_content/conf.py
|
Python
|
mit
| 814
| 0
|
from django.conf import settings
DEFAULTS = {
'SOCIAL_CONTENT_TYPES': (
'Facebook',
'Twitter',
|
'Instagram',
),
'SOCIAL_CONTENT_MAX_POSTS': None,
# Facebook
'FACEBOOK_APP_ID': None,
'FACEBOOK_APP_SECRET': None,
# Twitter
'TWITTER_CONSUMER_KEY': None,
'TWITTER_CONSUMER_SECRET': None,
'TWITTER_ACCESS_TOKEN_KEY': None,
'TWITTER_ACCESS_TOKEN_SECRET': None,
# Instagram
'INSTAGRAM_CLIENT_ID': None,
'INSTA
|
GRAM_CLIENT_SECRET': None,
'INSTAGRAM_ACCESS_TOKEN': None,
# YouTube
'YOUTUBE_APP_API_KEY': None,
# Tumblr
'TUMBLR_API_CONSUMER_KEY': None
}
for setting in DEFAULTS.keys():
try:
getattr(settings, setting)
except AttributeError:
setattr(settings, setting, DEFAULTS[setting])
|
vlegoff/tsunami
|
src/secondaires/magie/editeurs/spedit/__init__.py
|
Python
|
bsd-3-clause
| 9,326
| 0.002161
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'spedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package.
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Auquel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur.choix import Choix
from primaires.interpreteur.editeur.description import Description
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.flag import Flag
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.selection import Selection
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.scripting.editeurs.edt_script import EdtScript
from secondaires.magie.constantes import ELEMENTS
from .edt_difficulte import EdtDifficulte
from .supprimer import NSupprimer
class EdtSpedit(Presentation):
"""Classe définissant l'éditeur de sort 'spedit'.
"""
nom = "spedit"
def __init__(self, personnage, sort):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, sort)
if personnage and sort:
self.construire(sort)
def __getnewargs__(self):
return (None, None)
def construire(self, sort):
"""Construction de l'éditeur"""
# Nom
nom = self.ajouter_choix("nom", "n", Uniligne, sort, "nom")
nom.parent = self
nom.prompt = "Nom du sort (sans article) : "
nom.apercu = "{objet.nom}"
nom.aide_courte = \
"Entrez le |ent|nom|ff| du sort ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nNom actuel : |bc|{objet.nom}|ff|"
# Description
description = self.ajouter_choix("description", "d", Description, \
sort)
description.parent = self
description.apercu = "{objet.description.paragraphes_indentes}"
description.aide_courte = \
"| |tit|" + "Description du sort {}".format(sort.cle).ljust(76) + \
"|ff||\n" + self.opts.separateur
# Points de tribut
tribut = self.ajouter_choix("points de tribut", "tr", Entier, sort,
"points_trib
|
ut", 1)
tribut.parent = self
tribut.prompt = "Points de tribut nécessaire pour apprendre le sort : "
tribut.apercu = "{objet.points_tribut}"
tribut.aide_courte = \
"Entrez le |ent|nombre de points de tribut|ff| nécessaires "\
"pour apprendre le sort\nou |cmd|/|ff| pour revenir à la " \
"fenêtre parente.\n\nPoints de tribut actuels : " \
"|bc|{objet.points_tribut}|ff|"
# Éléments
|
elements = self.ajouter_choix("eléments", "e", Selection, sort,
"elements", ELEMENTS)
elements.parent = self
elements.apercu = "{objet.str_elements}"
elements.aide_courte = \
"Entrez un |ent|élément|ff| pour l'ajouter " \
"ou le retirer\nou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\n\n" \
"Éléments existants : |cmd|" + "|ff|, |cmd|".join(
ELEMENTS) + "\n" \
"Éléments actuels : |bc|{objet.str_elements}|ff|"
# Type de sort
types = ["destruction", "alteration", "invocation", "illusion"]
type = self.ajouter_choix("type de sort", "s", Choix, sort,
"type", types)
type.parent = self
type.prompt = "Type de sort : "
type.apercu = "{objet.type}"
type.aide_courte = \
"Entrez le |ent|type|ff| du sort ou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\nTypes disponibles : |cmd|" \
"{}|ff|.\n\nType actuel : |bc|{{objet.type}}|ff|".format(
"|ff|, |cmd|".join(types))
# Cible
types = ["aucune", "personnage", "objet", "salle"]
cible = self.ajouter_choix("type de cible", "c", Choix, sort,
"type_cible", types)
cible.parent = self
cible.prompt = "Type de cible : "
cible.apercu = "{objet.type_cible}"
cible.aide_courte = \
"Entrez le |ent|type de cible|ff| du sort ou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\nTypes disponibles : |cmd|" \
"{}|ff|.\n\nType actuel : |bc|{{objet.type_cible}}|ff|".format(
"|ff|, |cmd|".join(types))
# Stats
stats = self.ajouter_choix("stats", "st", Selection, sort,
"stats", ("agilite", "intelligence", "sensibilite"))
stats.parent = self
stats.apercu = "{objet.str_stats}"
stats.aide_courte = \
"Entrez une |ent|stat|ff| pour l'ajouter " \
"ou la retirer\nou |cmd|/|ff| " \
"pour revenir à la fenêtre parente.\n\n" \
"stats actuelles : |bc|{objet.str_stats}|ff|"
# Difficulté
difficulte = self.ajouter_choix("difficulté", "i", Entier, sort,
"difficulte", 0, 100)
difficulte.parent = self
difficulte.prompt = "Difficulté d'apprentissage : "
difficulte.apercu = "{objet.difficulte}"
difficulte.aide_courte = \
"Paramétrez la |ent|difficulté|ff| d'apprentissage du sort " \
"entre |cmd|0|ff| et |cmd|100|ff| ou entrez\n|cmd|/|ff| pour " \
"revenir à la fenêtre parente. |cmd|100|ff| signifie que le sort " \
"ne peut pas\nêtre appris par la pratique.\n\n" \
"Difficulté actuelle : |bc|{objet.difficulte}|ff|"
# Coût
cout = self.ajouter_choix("coût", "o", Entier, sort, "cout")
cout.parent = self
cout.prompt = "Coùt en mana : "
cout.apercu = "{objet.cout}"
cout.aide_courte = \
"Entrez la |ent|quantité|ff| d'énergie magique nécessaire pour " \
"lancer ce sort ou |cmd|/|ff| pour\nrevenir à la fenêtre " \
"parente.\n\n" \
"Coût : |bc|{objet.cout}|ff|"
# Durée
duree = self.ajouter_choix("durée de concentration", "u", Entier, sort,
"duree", 1)
duree.parent = self
duree.prompt = "Durée de concentration : "
duree.apercu = "{objet.duree}"
duree.aide_courte = \
"Entrez la |ent|durée|ff| de concentration du sort, en " \
"secondes, ou |cmd|/|ff| pour revenir à\nla fenêtre parente
|
AdrianGaudebert/socorro-crashstats
|
crashstats/crashstats/tests/test_forms.py
|
Python
|
mpl-2.0
| 12,838
| 0
|
import datetime
from nose.tools import eq_, ok_
from django.conf import settings
from django.test import TestCase
from crashstats.crashstats import forms
class TestForms(TestCase):
def setUp(self):
# Mocking models needed for form validation
self.current_products = {
'Firefox': [],
'Thunderbird': [],
'SeaMonkey': []
}
self.current_versions = [
{
'product': 'Firefox',
'version': '20.0',
"release": "Beta"
},
{
'product': 'Firefox',
'version': '21.0a1',
"release": "Nightly"
},
{
'product': 'Thunderbird',
'version': '20.0',
"
|
release": "Beta",
},
{
'product': 'SeaMonkey',
'version': '9.5',
"release": "Beta"
}
]
self.current_platforms = [
{
'code': 'windows',
'name': 'Windows'
|
},
{
'code': 'mac',
'name': 'Mac OS X'
},
{
'code': 'linux',
'name': 'Linux'
}
]
def test_report_list(self):
def get_new_form(data):
return forms.ReportListForm(
self.current_products,
self.current_versions,
self.current_platforms,
data
)
form = get_new_form({'range_value': '-1'})
ok_(not form.is_valid()) # missing signature and invalid range
form = get_new_form({
'signature': 'sig',
'range_value': '-1'
})
ok_(not form.is_valid()) # invalid range_value
form = get_new_form({
'signature': 'sig',
'product': ['SomeUnkownProduct']
})
ok_(not form.is_valid()) # invalid product
form = get_new_form({
'signature': 'sig',
'version': 'invalidVersion'
})
ok_(not form.is_valid()) # invalid version
form = get_new_form({
'signature': 'sig',
'version': ['Another:Invalid']
})
ok_(not form.is_valid()) # invalid version
form = get_new_form({
'signature': 'sig',
'platform': ['winux']
})
ok_(not form.is_valid()) # invalid platform
form = get_new_form({
'signature': 'sig',
'plugin_query_type': 'invalid'
})
ok_(not form.is_valid()) # invalid query type
# Test all valid data
form = get_new_form({
'signature': 'sig',
'product': ['Firefox', 'SeaMonkey', 'Thunderbird'],
'version': ['Firefox:20.0'],
'platform': ['linux', 'mac'],
'date': '01/02/2012 12:23:34',
'range_unit': 'weeks',
'range_value': 12,
'reason': 'some reason',
'build_id': '20200101344556',
'process_type': 'any',
'hang_type': 'any',
'plugin_field': 'name',
'plugin_query_type': 'is_exactly',
'plugin_query': 'plugin name'
})
ok_(form.is_valid())
# Test expected types
ok_(isinstance(form.cleaned_data['date'], datetime.datetime))
ok_(isinstance(form.cleaned_data['range_value'], int))
ok_(isinstance(form.cleaned_data['product'], list))
ok_(isinstance(form.cleaned_data['version'], list))
ok_(isinstance(form.cleaned_data['platform'], list))
# Test default values
form = get_new_form({'signature': 'sig',
'range_unit': 'weeks',
'hang_type': 'any',
'process_type': 'any',
'plugin_field': 'filename'})
ok_(form.is_valid())
eq_(form.cleaned_data['product'], [])
eq_(form.cleaned_data['version'], [])
eq_(form.cleaned_data['platform'], [])
eq_(form.cleaned_data['range_unit'], 'weeks')
eq_(form.cleaned_data['process_type'], 'any')
eq_(form.cleaned_data['hang_type'], 'any')
eq_(form.cleaned_data['plugin_field'], 'filename')
def test_report_list_date(self):
def get_new_form(data):
return forms.ReportListForm(
self.current_products,
self.current_versions,
self.current_platforms,
data
)
# known formats
datetime_ = datetime.datetime(2012, 1, 2, 13, 45, 55)
date = datetime.datetime(2012, 1, 2, 0, 0)
data = {'signature': 'sig'}
fmt = '%Y-%m-%d'
form = get_new_form(dict(data, date=datetime_.strftime(fmt)))
ok_(form.is_valid(), form.errors)
eq_(form.cleaned_data['date'], date)
fmt = '%m/%d/%Y' # US format
form = get_new_form(dict(data, date=datetime_.strftime(fmt)))
ok_(form.is_valid(), form.errors)
eq_(form.cleaned_data['date'], date)
fmt = '%m/%d/%Y %H:%M:%S' # US format
form = get_new_form(dict(data, date=datetime_.strftime(fmt)))
ok_(form.is_valid(), form.errors)
eq_(form.cleaned_data['date'], datetime_)
def test_signature_summary(self):
def get_new_form(data):
return forms.SignatureSummaryForm(
self.current_products,
self.current_versions,
data,
)
form = get_new_form({'range_value': '-1'})
ok_(not form.is_valid()) # missing signature and invalid range
form = get_new_form({
'signature': 'sig',
'range_value': '-1',
'versions': 'Firefox:19.0',
})
ok_(not form.is_valid()) # invalid range_value
long_signature = 'x' * (settings.SIGNATURE_MAX_LENGTH + 1)
form = get_new_form({
'signature': long_signature,
'range_unit': 'days',
'range_value': 12,
'versions': 'Firefox:19.0',
})
ok_(not form.is_valid()) # signature too long
# Test all valid data
form = get_new_form({
'signature': 'sig',
'range_unit': 'days',
'range_value': 12,
'versions': 'Firefox:19.0',
})
ok_(form.is_valid())
# Test expected types
ok_(isinstance(form.cleaned_data['range_value'], int))
# Test default values
form = get_new_form({'signature': 'sig'})
ok_(form.is_valid())
def test_crashtrends_json(self):
now = datetime.datetime.utcnow()
week_ago = now - datetime.timedelta(days=7)
def get_new_form(data):
return forms.CrashTrendsForm(
self.current_versions,
data
)
form = get_new_form({
'product': '',
'version': '19.0',
'start_date': now,
'end_date': week_ago
})
# All fields are required
# Testing empty product
ok_(not form.is_valid())
form = get_new_form({
'product': 'Firefox',
'version': '',
'start_date': now,
'end_date': week_ago
})
# All fields are required
# Testing empty version
ok_(not form.is_valid())
form = get_new_form({
'product': 'Firefox',
'version': '21.0',
'start_date': '',
'end_date': '2012-11-02'
})
# All fields are required
# Testing empty start_date
ok_(not form.is_valid())
form = get_new_form({
'product': 'Firefox',
'version': '19.0',
'start_date': now,
'end_date': week_ago
})
# Testing invalid product version
ok_(not form.is_valid())
form = get_new_form({
'product': 'Gorilla',
'version': '19.0',
'start_date':
|
ondoheer/GOT-Platform
|
app/houses/views.py
|
Python
|
gpl-2.0
| 723
| 0
|
from
|
flask import Blueprint, request, render_template, jsonify
from housesGenerator import House
from holdings import Holdings
houses = Blueprint('houses', __name__, url_prefix='/houses')
@houses.route('/')
def index():
return render_template('houses.html')
@houses.route('/houseGenerator', methods=['GET', 'POST'])
def houseGenerator():
realm = request.args.get('realm')
size = request.args.get('size')
foundation = request.args.get('foundati
|
on')
name = request.args.get('name')
house = House.startingResources(realm, size, foundation, name)
from holdings import holdingsData
generatedHouse = Holdings(holdingsData).generateAllHoldings(house, realm)
return jsonify(generatedHouse)
|
magosil86/ruffus
|
ruffus/drmaa_wrapper.py
|
Python
|
mit
| 18,668
| 0.013285
|
#!/usr/bin/env python
from __future__ import print_function
################################################################################
#
#
# drmaa_wrapper.py
#
# Copyright (C) 2013 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Portions of code from adapted from:
#
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# Courtesy of J.F. Sebastian
# Use is licensed under the "Creative Commons Attribution Share Alike license"
# See http://stackexchange.com/legal
#
#################################################################################
"""
********************************************
:mod:`ruffus.cmdline` -- Overview
********************************************
.. moduleauthor:: Leo Goodstadt <ruffus@llew.org.uk>
#
# Using drmaa
#
from ruffus import *
import drmaa_wrapper
"""
import sys, os
import stat
#
# tempfile for drmaa scripts
#
import tempfile
import datetime
import subprocess
import time
import sys
import subprocess
import threading
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
if sys.hexversion >= 0x03000000:
# everything is unicode in python3
path_str_type = str
else:
path_str_type = basestring
#_________________________________________________________________________________________
# error_drmaa_job
#_________________________________________________________________________________________
class error_drmaa_job(Exception):
"""
All exceptions throw in this module
"""
def __init__(self, *errmsg):
Exception.__init__(self, *errmsg)
#_________________________________________________________________________________________
# read_stdout_stderr_from_files
#_________________________________________________________________________________________
def read_stdout_stderr_from_files( stdout_path, stderr_path, logger = None, cmd_str = "", tries=5):
"""
Reads the contents of two specified paths and returns the strings
Thanks to paranoia approach contributed by Andreas Heger:
Retry just in case file system hasn't committed.
Logs error if files are missing: No big deal?
Cleans up files afterwards
Returns tuple of stdout and stderr.
"""
#
# delay up to 10 seconds until files are ready
#
for xxx in range(tries):
if os.path.exists( stdout_path ) and os.path.exists( stderr_path ):
break
time.sleep(2)
try:
stdout = open( stdout_path, "r" ).readlines()
except IOError:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
msg = str(exceptionValue)
if logger:
logger.warning( "could not open stdout: %s for \n%s" % (msg, cmd_str))
stdout = []
try:
stderr = open( stderr_path, "r" ).readlines()
except IOError:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
msg = str(exceptionValue)
if logger:
logger.warning( "could not open stderr: %s for \n%s" % (msg, cmd_str))
stderr = []
#
# cleanup ignoring errors
#
try:
os.unlink( stdout_path )
os.unlink( stderr_path )
except OSError:
pass
return stdout, stderr
#_________________________________________________________________________________________
# setup_drmaa_job
#_________________________________________________________________________________________
def setup_drmaa_job( drmaa_session, job_name, job_environment, working_directory, job_other_options):
job_template = drmaa_session.createJobTemplate()
if not working_directory:
job_template.workingDirectory = os.getcwd()
else:
job_template.workingDirectory = working_directory
if job_environment:
# dictionary e.g. { 'BASH_ENV' : '~/.bashrc' }
job_template.jobEnvironment = job_environment
job_template.args = []
if job_name:
job_template.jobName = job_name
else:
# nameless jobs sometimes breaks drmaa implementations...
job_template.jobName = "ruffus_job_" + "_".join(map(str, datetime.datetime.now().timetuple()[0:6]))
#
# optional job parameters
#
job_template.nativeSpecification = job_other_options
# separate stdout and stderr
job_template.joinFiles=False
return job_template
#_________________________________________________________________________________________
# write_job_script_to_temp_file
#_________________________________________________________________________________________
def write_job_script_to_temp_file( cmd_str, job_script_directory, job_name, job_other_options, job_environment, working_directory):
'''
returns (job_script_path, stdout_path, stderr_path)
'''
import sys
|
time_stmp_str = "_".join(map(str, datetime.datetime.now().timetuple()[0:6]))
# create script directory if necessary
# Ignore errors rather than test for existence to avo
|
id race conditions
try:
os.makedirs(job_script_directory)
except:
pass
tmpfile = tempfile.NamedTemporaryFile(mode='w', prefix='drmaa_script_' + time_stmp_str + "__", dir = job_script_directory, delete = False)
#
# hopefully #!/bin/sh is universally portable among unix-like operating systems
#
tmpfile.write( "#!/bin/sh\n" )
#
# log parameters as suggested by Bernie Pope
#
for title, parameter in ( ("job_name", job_name, ),
("job_other_options", job_other_options,),
("job_environment", job_environment, ),
("working_directory", working_directory), ):
if parameter:
tmpfile.write( "#%s=%s\n" % (title, parameter))
tmpfile.write( cmd_str + "\n" )
tmpfile.close()
job_script_path = os.path.abspath( tmpfile.name )
stdout_path = job_script_path + ".stdout"
stderr_path = job_script_path + ".stderr"
os.chmod( job_script_path, stat.S_IRWXG | stat.S_IRWXU )
return (job_script_path, stdout_path, stderr_path)
#_________________________________________________________________________________________
# run_job_using_drmaa
#_________________________________________________________________________________________
def run_job_using_drmaa (cmd_str, job_name = None, job_other_options = "", job_script_directory = None, job_environment = None, working_directory = None, retain_job_scripts = False, logger = None, drmaa_session = None, verbose = 0):
"""
Runs specified command remotely using drmaa,
either with the specified session, or the module shared drmaa session
"""
import drmaa
#
# used specified session else module session
#
if drmaa_session is None:
raise error_drmaa_job( "Please specify a drmaa_session in run_job()")
#
# make job template
#
|
pivotal-partner-solution-architecture/pcf-gcp-python
|
pcfgcp/pcfgcp.py
|
Python
|
apache-2.0
| 5,039
| 0.004366
|
#
# Copyright (c) 2017-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from operator import itemgetter
import base64
import tempfile
from google.oauth2.service_account import Credentials
"""Base class for accessing Google Cloud Platform services from Python apps
deployed to PCF. This class i
|
mplements the authentication part.
Here are the various service names, as defined in
https://github.com/GoogleCloudPlatform/gcp-service-broker/blob/master/brokerapi/brokers/models/service_broker.go
const StorageName = "google-storage"
const BigqueryName = "google-bigquery"
const BigtableName = "google-bigtable"
const CloudsqlName = "google-cloudsql"
const PubsubName = "google-pubsub"
const MlN
|
ame = "google-ml-apis"
"""
class PcfGcp:
def __init__(self):
self.VCAP_SERVICES = None
self.clients = {
'storage': None
, 'google-bigquery': None
, 'google-bigtable': None
, 'google-cloudsql': None
, 'google-pubsub': None
, 'language': None
, 'vision': None
}
self.projectId = None
self.bucketName = None # Storage
def className(self):
return self.__class__.__name__
def getClient(self, name):
return self.clients.get(name)
def setClient(self, name, val):
self.clients[name] = val
def get_service_instance_dict(self, serviceName): # 'google-storage', etc.
vcapStr = os.environ.get('VCAP_SERVICES')
if vcapStr is None:
raise Exception('VCAP_SERVICES not found in environment variables (this is required)')
vcap = json.loads(vcapStr)
svcs = None
try:
svcs = vcap[serviceName][0]
except:
raise Exception('No instance of ' + serviceName + ' available')
return svcs
def get_google_cloud_credentials(self, serviceName):
"""Returns oauth2 credentials of type
google.oauth2.service_account.Credentials
"""
service_info = self.get_service_instance_dict(serviceName)
pkey_data = base64.decodestring(service_info['credentials']['PrivateKeyData'])
pkey_dict = json.loads(pkey_data)
self.credentials = Credentials.from_service_account_info(pkey_dict)
# Get additional fields
self.projectId = service_info['credentials']['ProjectId']
print 'ProjectID: %s' % self.projectId
if 'bucket_name' in service_info['credentials']:
self.bucketName = service_info['credentials']['bucket_name']
# Set the environment variable for GCP (this was the only way to get Storage to work).
credFile = tempfile.gettempdir() + '/' + 'GCP_credentials.json'
with open(credFile, 'w') as out:
out.write(pkey_data)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credFile
print 'Wrote credentials to %s' % credFile
print 'Set env GOOGLE_APPLICATION_CREDENTIALS to %s' % os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
return self.credentials
"""Ref. https://cloud.google.com/natural-language/docs/sentiment-tutorial
score ranges from -1.0 to 1.0
magnitude ranges from 0.0 to Infinite (depends on length of document)
"""
def getLanguage(self):
if self.clients['language'] is None:
from google.cloud import language
self.clients['language'] = language.Client(self.get_google_cloud_credentials('google-ml-apis'))
# print 'projectId: %s' % self.projectId
return self.clients['language']
"""Ref. https://cloud.google.com/vision/docs/reference/libraries#client-libraries-install-python"""
def getVision(self):
if self.clients['vision'] is None:
from google.cloud import vision
self.clients['vision'] = vision.Client(project=self.projectId, credentials=self.get_google_cloud_credentials('google-ml-apis'))
return self.clients['vision']
def getStorage(self):
if self.clients['storage'] is None:
from google.cloud import storage
self.get_google_cloud_credentials('google-storage')
self.clients['storage'] = storage.Client(self.projectId)
return self.clients['storage']
def getBucketName(self):
return self.bucketName
def getBigQuery(self):
pass
def getBigtable(self):
pass
def getCloudSql(self):
pass
def getPubSub(self):
pass
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/base.py
|
Python
|
gpl-2.0
| 1,002
| 0.001996
|
from django.db.backends.creation import NO_DB_ALIAS
from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper as Psycopg2DatabaseWrapper
from django.contrib.gis.db.backends.postgis.creation import PostGISCreation
from django.contrib.gis.db.backends.postgis.introspection import PostGISIntrospection
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
from django.contrib.gis.db.backends.postgis.schema import PostGISSchemaEditor
class DatabaseWrapper(Psycopg2DatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
if kwargs.get('alias', '') != NO_DB_ALIAS:
self.creation = PostGISCreation(self)
self.ops = PostGISOperations(self)
self.introspection = PostGISIntrospection(self)
def s
|
chema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return PostGISSchemaEditor(self, *args, **kwar
|
gs)
|
cpfair/pyScss
|
scss/calculator.py
|
Python
|
mit
| 6,935
| 0.000433
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import logging
from warnings import warn
import six
from scss.ast import Literal
from scss.cssdefs import _expr_glob_re, _interpolate_re
from scss.errors import SassError, SassEvaluationError, SassParseError
from scss.grammar.expression import SassExpression, SassExpressionScanner
from scss.rule import Namespace
from scss.types import String
from scss.types import Value
from scss.util import dequote
log = logging.getLogger(__name__)
class Calculator(object):
"""Expression evaluator."""
ast_cache = {}
def __init__(
self, namespace=None,
ignore_parse_errors=False
|
,
undefined_variables_fatal=True,
):
if namespace is None:
self.namespace = Namespace()
else:
self.namespace = namespace
self.ignore_parse_err
|
ors = ignore_parse_errors
self.undefined_variables_fatal = undefined_variables_fatal
def _pound_substitute(self, result):
expr = result.group(1)
value = self.evaluate_expression(expr)
if value is None:
return self.apply_vars(expr)
elif value.is_null:
return ""
else:
return dequote(value.render())
def do_glob_math(self, cont):
"""Performs #{}-interpolation. The result is always treated as a fixed
syntactic unit and will not be re-evaluated.
"""
# TODO that's a lie! this should be in the parser for most cases.
if not isinstance(cont, six.string_types):
warn(FutureWarning(
"do_glob_math was passed a non-string {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(cont)
))
cont = six.text_type(cont)
if '#{' not in cont:
return cont
cont = _expr_glob_re.sub(self._pound_substitute, cont)
return cont
def apply_vars(self, cont):
# TODO this is very complicated. it should go away once everything
# valid is actually parseable.
if isinstance(cont, six.string_types) and '$' in cont:
try:
# Optimization: the full cont is a variable in the context,
cont = self.namespace.variable(cont)
except KeyError:
# Interpolate variables:
def _av(m):
v = None
n = m.group(2)
try:
v = self.namespace.variable(n)
except KeyError:
if self.undefined_variables_fatal:
raise SyntaxError("Undefined variable: '%s'." % n)
else:
log.error("Undefined variable '%s'", n, extra={'stack': True})
return n
else:
if v:
if not isinstance(v, Value):
raise TypeError(
"Somehow got a variable {0!r} "
"with a non-Sass value: {1!r}"
.format(n, v)
)
v = v.render()
# TODO this used to test for _dequote
if m.group(1):
v = dequote(v)
else:
v = m.group(0)
return v
cont = _interpolate_re.sub(_av, cont)
else:
# Variable succeeded, so we need to render it
cont = cont.render()
# TODO this is surprising and shouldn't be here
cont = self.do_glob_math(cont)
return cont
def calculate(self, expression, divide=False):
result = self.evaluate_expression(expression, divide=divide)
if result is None:
return String.unquoted(self.apply_vars(expression))
return result
# TODO only used by magic-import...?
def interpolate(self, var):
value = self.namespace.variable(var)
if var != value and isinstance(value, six.string_types):
_vi = self.evaluate_expression(value)
if _vi is not None:
value = _vi
return value
def evaluate_expression(self, expr, divide=False):
try:
ast = self.parse_expression(expr)
except SassError as e:
if self.ignore_parse_errors:
return None
raise
try:
return ast.evaluate(self, divide=divide)
except Exception as e:
six.reraise(SassEvaluationError, SassEvaluationError(e, expression=expr), sys.exc_info()[2])
def parse_expression(self, expr, target='goal'):
if isinstance(expr, six.text_type):
# OK
pass
elif isinstance(expr, six.binary_type):
# Dubious
warn(FutureWarning(
"parse_expression was passed binary data {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(expr)
))
# Don't guess an encoding; you reap what you sow
expr = six.text_type(expr)
else:
raise TypeError("Expected string, got %r" % (expr,))
key = (target, expr)
if key in self.ast_cache:
return self.ast_cache[key]
try:
parser = SassExpression(SassExpressionScanner(expr))
ast = getattr(parser, target)()
except SyntaxError as e:
raise SassParseError(e, expression=expr, expression_pos=parser._char_pos)
self.ast_cache[key] = ast
return ast
def parse_interpolations(self, string):
"""Parse a string for interpolations, but don't treat anything else as
Sass syntax. Returns an AST node.
"""
# Shortcut: if there are no #s in the string in the first place, it
# must not have any interpolations, right?
if '#' not in string:
return Literal(String.unquoted(string))
return self.parse_expression(string, 'goal_interpolated_literal')
def parse_vars_and_interpolations(self, string):
"""Parse a string for variables and interpolations, but don't treat
anything else as Sass syntax. Returns an AST node.
"""
# Shortcut: if there are no #s or $s in the string in the first place,
# it must not have anything of interest.
if '#' not in string and '$' not in string:
return Literal(String.unquoted(string))
return self.parse_expression(
string, 'goal_interpolated_literal_with_vars')
__all__ = ('Calculator',)
|
Riizade/Magic-the-Gathering-Analysis
|
card.py
|
Python
|
mit
| 71
| 0.014085
|
class Card:
count = 0
url = ""
na
|
me = ""
s
|
ideboard = -1
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/resultset.py
|
Python
|
gpl-3.0
| 6,557
| 0.00122
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = valu
|
e
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
|
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
|
zhimin711/nova
|
nova/api/openstack/compute/schemas/migrate_server.py
|
Python
|
apache-2.0
| 1,742
| 0
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
host = copy.deepcopy(parameter_types.hostname)
host['type'] = ['string', 'null']
migrate_live = {
'type': 'object',
'properties': {
'os-migrateLive': {
'type': 'object',
'properties': {
'block_migration': parameter_type
|
s.boolean,
'disk_over_commit': parameter_types.boolean,
'host': host
},
'required': ['block_migration', 'disk_over_commit', 'host'],
'additionalProperties': False,
},
},
'required': ['os-migrateLive'],
'additionalProperties': False,
}
block_migration = copy.deepcopy(parameter_types.boolean)
block_migration['enum'].append('auto')
migrate_live_v2_25 = copy.deepcopy(migrate_live)
del migrate_live_v2_25['pro
|
perties']['os-migrateLive']['properties'][
'disk_over_commit']
migrate_live_v2_25['properties']['os-migrateLive']['properties'][
'block_migration'] = block_migration
migrate_live_v2_25['properties']['os-migrateLive']['required'] = (
['block_migration', 'host'])
|
NeuromorphicProcessorProject/snn_toolbox
|
snntoolbox/simulation/backends/inisim/temporal_mean_rate_tensorflow.py
|
Python
|
mit
| 28,900
| 0
|
# -*- coding: utf-8 -*-
"""INI temporal mean rate simulator with Tensorflow backend.
This module defines the layer objects used to create a spiking neural network
for our built-in INI simulator
:py:mod:`~snntoolbox.simulation.target_simulators.INI_temporal_mean_rate_target_sim`.
The coding scheme underlying this conversion is that the analog activation
value is represented by the average over number of spikes that occur during the
simulation duration.
@author: rbodo
"""
import os
import json
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, AveragePooling2D, \
MaxPooling2D, Conv2D, DepthwiseConv2D, ZeroPadding2D, Reshape, Layer, \
Concatenate, Conv1D, Conv2DTranspose, UpSampling2D
from tensorflow.python.keras.utils import conv_utils
from snntoolbox.parsing.utils import get_inbound_layers
# Experimental
clamp_var = False
v_clip = False
class SpikeLayer(Layer):
"""Base class for layer with spiking neurons."""
def __init__(self, **kwargs):
self.config = kwargs.pop(str('config'), None)
if self.config is None:
from snntoolbox.bin.utils import load_config
# Todo: Enable loading config here. Needed when trying to load a
# converted SNN from disk. For now we specify a dummy path.
try:
self.config = load_config('wdir/log/gui/test/.config')
except FileNotFoundError:
raise NotImplementedError
self.layer_type = self.class_name
self.dt = self.config.getfloat('simulation', 'dt')
self.duration = self.config.getint('simulation', 'duration')
self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
self._v_thresh = self.config.getfloat('cell', 'v_thresh')
self.v_thresh = None
self.time = None
self.mem = self.spiketrain = self.impulse = self.spikecounts = None
self.mem_input = None # Used in MaxPooling layers
self.refrac_until = self.max_spikerate = None
if clamp_var:
self.spikerate = self.var = None
from snntoolbox.utils.utils import get_abs_path
path, filename = \
get_abs_path(self.config.get('paths', 'filename_clamp_indices'),
self.config)
if filename != '':
filepath = os.path.join(path, filename)
assert os.path.isfile(filepath), \
"File with clamp indices not found at {}.".format(filepath)
self.filename_clamp_indices = filepath
self.clamp_idx = None
self.payloads = None
self.payloads_sum = None
self.online_normalization = self.config.getboolean(
'normalization', 'online_normalization')
allowed_kwargs = {'input_shape',
'batch_input_shape',
'batch_size',
'dtype',
'name',
'trainable',
'weights',
'input_dtype', # legacy
}
for kwarg in kwargs.copy():
if kwarg not in allowed_kwargs:
kwargs.pop(kwarg)
Layer.__init__(self, **kwargs)
self.stateful = True
self._floatx = tf.keras.backend.floatx()
def reset(self, sample_idx):
"""Reset layer variables."""
self.reset_spikevars(tf.constant(sample_idx))
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
def update_neurons(self):
"""Update neurons according to activation function."""
new_mem = self.get_new_mem()
if hasattr(self, 'activation_str'):
if self.activation
|
_str == 'softmax':
output_spikes = self.softmax_activation(new_mem)
elif self.activation_str == 'binary_sigmoid':
output_spikes = self.binary_sigmoid_activation(new_mem)
elif self.activation_str == 'binary_tanh':
output_spikes = self.binary_tanh_activation(new_mem)
elif '_Q' in self.activat
|
ion_str:
m, f = map(int, self.activation_str[
self.activation_str.index('_Q') + 2:].split('.'))
output_spikes = self.quantized_activation(new_mem, m, f)
else:
output_spikes = self.linear_activation(new_mem)
else:
output_spikes = self.linear_activation(new_mem)
# Store spiking
self.set_reset_mem(new_mem, output_spikes)
# Store refractory
if self.tau_refrac > 0:
new_refractory = tf.where(tf.not_equal(output_spikes, 0),
self.time + self.tau_refrac,
self.refrac_until)
self.refrac_until.assign(new_refractory)
if self.payloads:
residuals = tf.where(tf.not_equal(output_spikes, 0),
new_mem - self._v_thresh, new_mem)
self.update_payload(residuals, output_spikes)
if self.online_normalization:
self.spikecounts.assign_add(tf.cast(tf.not_equal(output_spikes, 0),
self._floatx))
self.max_spikerate.assign(tf.reduce_max(self.spikecounts)
* self.dt / self.time)
if self.spiketrain is not None:
self.spiketrain.assign(tf.cast(tf.not_equal(output_spikes, 0),
self._floatx) * self.time)
return tf.cast(output_spikes, self._floatx)
def update_payload(self, residuals, spikes):
"""Update payloads.
Uses the residual of the membrane potential after spike.
"""
idxs = tf.not_equal(spikes, 0)
payloads = tf.where(idxs, residuals[idxs] - self.payloads_sum[idxs],
self.payloads)
payloads_sum = tf.where(idxs, self.payloads_sum + self.payloads,
self.payloads_sum)
self.payloads.assign(payloads)
self.payloads_sum.assign(payloads_sum)
def linear_activation(self, mem):
"""Linear activation."""
return tf.cast(tf.greater_equal(mem, self.v_thresh), self._floatx) * \
self.v_thresh
def binary_sigmoid_activation(self, mem):
"""Binary sigmoid activation."""
return tf.cast(tf.greater(mem, 0), self._floatx) * self.v_thresh
def binary_tanh_activation(self, mem):
"""Binary tanh activation."""
output_spikes = tf.cast(tf.greater(mem, 0), self._floatx) \
* self.v_thresh
output_spikes += tf.cast(tf.less(mem, 0), self._floatx) \
* -self.v_thresh
return output_spikes
def softmax_activation(self, mem):
"""Softmax activation."""
# spiking_samples = k.less_equal(k.random_uniform([self.config.getint(
# 'simulation', 'batch_size'), ]), 300 * self.dt / 1000.)
# spiking_neurons = k.repeat(spiking_samples, 10)
# activ = k.softmax(mem)
# max_activ = k.max(activ, axis=1, keepdims=True)
# output_spikes = k.equal(activ, max_activ).astype(self._floatx)
# output_spikes = tf.where(k.equal(spiking_neurons, 0),
# k.zeros_like(output_spikes), output_spikes)
# new_and_reset_mem = tf.where(spiking_neurons, k.zeros_like(mem),
# mem)
# self.add_update([(self.mem, new_and_reset_mem)])
# return output_spikes
output_spikes = tf.less_equal(tf.random.uniform(tf.shape(mem)),
tf.nn.softmax(mem))
return tf.cast(output_spikes, self._floatx) * self.v_thresh
def quantized_activation(self, mem, m, f):
"""Activation with precision reduced to fixed point format Qm.f."""
# Todo: Needs to be implemented somehow...
return tf.cast(tf.greater_equal(mem, self.v_thresh), self._floatx) * \
self.v_thresh
def get_new_mem(self):
"""Add input to membrane p
|
botify-labs/python-simple-workflow
|
swf/models/history/base.py
|
Python
|
mit
| 7,957
| 0.000628
|
# -*- coding:utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from itertools import groupby
from swf.models.event import EventFactory, CompiledEventFactory
from swf.models.event.workflow import WorkflowExecutionEvent
from swf.utils import cached_property
class History(object):
"""Execution events history container
History object is an Event subclass objects container
which can be built directly against an amazon json response
using it's from_event_list method.
It is iterable and exposes a list-like __getitem__ for easier
manipulation.
:param events: Events list to build History upon
:type events: list
Typical amazon response looks like:
.. code-block:: json
{
"events": [
{
'ev
|
entId': 1,
'eventType': 'WorkflowExecutionStarted',
'workflowExecutionStartedEventAttributes': {
'taskList': {
'name': 'test'
},
|
'parentInitiatedEventId': 0,
'taskStartToCloseTimeout': '300',
'childPolicy': 'TERMINATE',
'executionStartToCloseTimeout': '6000',
'workflowType': {
'version': '0.1',
'name': 'test-1'
},
},
'eventTimestamp': 1365177769.585,
},
{
'eventId': 2,
'eventType': 'DecisionTaskScheduled',
'decisionTaskScheduledEventAttributes': {
'startToCloseTimeout': '300',
'taskList': {
'name': 'test'
}
},
'eventTimestamp': 1365177769.585
}
]
}
"""
def __init__(self, *args, **kwargs):
self.events = kwargs.pop('events', [])
self.raw = kwargs.pop('raw', None)
self.it_pos = 0
def __len__(self):
return len(self.events)
def __getitem__(self, val):
if isinstance(val, int):
return self.events[val]
elif isinstance(val, slice):
return History(events=self.events[val])
raise TypeError("Unknown slice format: %s" % type(val))
def __repr__(self):
events_repr = '\n\t'.join(
map(lambda e: e.__repr__(), self.events)
)
repr_str = '<History\n\t%s\n>' % events_repr
return repr_str
def __iter__(self):
return self
def next(self):
try:
next_event = self.events[self.it_pos]
self.it_pos += 1
except IndexError:
self.it_pos = 0
raise StopIteration
return next_event
@property
def last(self):
"""Returns the last stored event
:rtype: swf.models.event.Event
"""
return self.events[-1]
def latest(self, n):
"""Returns the n latest events stored in the History
:param n: latest events count to return
:type n: int
:rtype: list
"""
end_pos = len(self.events)
start_pos = len(self.events) - n
return self.events[start_pos:end_pos]
@property
def first(self):
"""Returns the first stored event
:rtype: swf.models.event.Event
"""
return self.events[0]
@property
def finished(self):
"""Checks if the History matches with a finished Workflow
Execution history state.
"""
completion_states = (
'completed',
'failed',
'canceled',
'terminated'
)
if (isinstance(self.last, WorkflowExecutionEvent) and
self.last.state in completion_states):
return True
return False
def filter(self, **kwargs):
"""Filters the history based on kwargs events attributes
Basically, allows to filter the history events upon their
types and states. Can be used for example to retrieve every
'DecisionTask' in the history, to check the presence of a specific
event and so on...
example:
.. code-block:: python
>>> history_obj.filter(type='ActivityTask', state='completed') # doctest: +SKIP
<History
<Event 23 ActivityTask : completed>
<Event 42 ActivityTask : completed>
<Event 61 ActivityTask : completed>
>
>>> history_obj.filter(type='DecisionTask') # doctest: +SKIP
<History
<Event 2 DecisionTask : scheduled>
<Event 3 DecisionTask : started>
<Event 7 DecisionTask : scheduled>
<Event 8 DecisionTask : started>
<Event 20 DecisionTask : scheduled>
<Event 21 DecisionTask : started>
>
:rtype: swf.models.history.History
"""
return filter(
lambda e: all(getattr(e, k) == v for k, v in kwargs.iteritems()),
self.events
)
@property
def reversed(self):
for i in xrange(len(self.events) - 1, -1, -1):
yield self.events[i]
@property
def distinct(self):
"""Extracts distinct history events based on their types
:rtype: list of swf.models.event.Event
"""
distinct_events = []
for key, group in groupby(self.events, lambda e: e.type):
g = list(group)
# Merge every WorkflowExecution events into same group
if (len(g) == 1 and
len(distinct_events) >= 1 and
g[0].type == "WorkflowExecution"):
# WorfklowExecution group will always be in first position
distinct_events[0].extend(g)
else:
distinct_events.append(list(g))
return distinct_events
def compile(self):
"""Compiles history events into a stateful History
based on events types and states transitions.
Every events stored in the resulting history are stateful
CompiledEvent subclasses instances then.
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
distinct_events = self.distinct
compiled_history = []
for events_list in distinct_events:
if len(events_list) > 0:
compiled_event = CompiledEventFactory(events_list[0])
for event in events_list[1:]:
compiled_event.transit(event)
compiled_history.append(compiled_event)
return History(events=compiled_history)
@cached_property
def compiled(self):
"""Compiled history version
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
return self.compile()
@classmethod
def from_event_list(cls, data):
"""Instantiates a new ``swf.models.history.History`` instance
from amazon service response.
Every member of the History are ``swf.models.event.Event``
subclasses instances, exposing their type, state, and so on to
facilitate decisions according to the history.
:param data: event history description (typically, an amazon response)
:type data: dict
:returns: History model instance built upon data description
:rtype : swf.model.event.History
"""
events_history = []
for index, d in enumerate(data):
event = EventFactory(d)
events_history.append(event)
return cls(events=events_history, raw=data)
|
kyleabeauchamp/EnsemblePaper
|
code/figures/plot_MCMC_traces.py
|
Python
|
gpl-3.0
| 1,098
| 0.004554
|
from fitensemble import belt
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import ALA3
import experiment_loader
grid = itertools.product(ALA3.ff_list, ALA3.prior_list)
bayesian_bootstrap_run = 0
for k, (ff, prior) in enumerate(grid):
print(ff, prior)
regulari
|
zation_strength = ALA3.regularization_strength_dict[prior][ff]
predictions, measurements, uncertainties = experiment_loader.load(ff)
pymc_filename = ALA3.data_directory + "/models/model_%s_%s_reg-%.1f-BB%d.h5" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
belt_model = belt.BELT.load(pymc_filename)
a = belt_model.mcmc.trace("alpha")[:]
plt.figure()
plt.title("%s - %s" % (ALA3.ff_map[ff], prior))
y = a[:,0]
x = np.arange(
|
len(y)) * ALA3.thin
plt.plot(x, y)
plt.xlabel("MCMC steps")
#plt.ylabel(r"$\alpha$:" + str(predictions.columns[0]))
plt.ylabel(predictions.columns[0])
plt.savefig(ALA3.outdir+"/%s-%s-MCMC_Trace.png" % (prior, ff), bbox_inches='tight')
|
brianseltzer1738/Platformer
|
platformer.py
|
Python
|
mit
| 4,532
| 0.007061
|
"""
platformer.py
Author: Brian S
Credit: Finn H
Assignment:
|
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Platformer
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset, ImageAsset, Frame
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 800
blue = Color(0x2EFE
|
C8, 1.0)
black = Color(0x000000, 1.0)
pink = Color(0xFF00FF, 1.0)
red = Color(0xFF5733, 1.0)
white = Color(0xFFFFFF, 1.0)
red = Color(0xff0000, 1.0)
green = Color(0x00ff00, 1.0)
blue = Color(0x0000ff, 1.0)
black = Color(0x000000, 1.0)
white = Color(0xffffff, 1.0)
grey = Color(0xC0C0C0, 1.0)
thinline = LineStyle(2, black)
blkline = LineStyle(1, black)
noline = LineStyle(0, white)
coolline = LineStyle(1, black)
blueline = LineStyle(2, blue)
redline = LineStyle(1, red)
greenline = LineStyle(1, pink)
gridline = LineStyle(1, grey)
grid=RectangleAsset(30,30,gridline,white)
black = Color(0, 1)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, red)
bg = Sprite(bg_asset, (0,0))
class Guy(Sprite):
guy = RectangleAsset(20, 40, coolline, green)
def __init__(self, x, y):
super().__init__(Guy.guy, (x, y))
self.x = x
self.y = y
class Brick(Sprite):
brick = RectangleAsset(30, 30, thinline, pink)
def __init__(self, x, y):
super().__init__(Brick.brick, (x, y))
self.x = x
self.y = y
def step(self):
self.grav += 0.25
self.y += self.grav
collide = self.collidingWithSprites(Brick)
if collide:
self.y -= self.grav
self.grav = 0
class Spring(Sprite):
spring = RectangleAsset(30, 5, thinline, blue)
def __init__(self, x, y):
super().__init__(Spring.spring, (x, y))
self.x = x
self.y = y
grav=0
springgrav = 0
class Platformer(App):
def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT):
super().__init__()
self.mousex = 0
self.mousey = 0
self.guy = 0
self.guysprite = None
self.brick = None
self.spring = None
self.listenKeyEvent('keydown', 'p', self.createGuy)
self.listenKeyEvent('keydown', 'w', self.createBrick)
self.listenMouseEvent('mousemove', self.motion)
self.listenKeyEvent('keydown', 'right arrow', self.R)
self.listenKeyEvent('keydown', 'left arrow', self.L)
self.listenKeyEvent('keydown', 'up arrow', self.U)
self.listenKeyEvent('keydown', 'down arrow', self.D)
self.listenKeyEvent('keydown', 's', self.createSpring)
def motion(self, event):
self.mousex = event.x
self.mousey = event.y
def createBrick(self, event):
x = self.mousex - self.mousex%30
y = self.mousey - self.mousey%30
Brick(x-10, y-10)
def createSpring(self, event):
global springgrav
x = self.mousex
y = self.mousey
Spring(x, y)
def createGuy (self, event):
global grav
if self.guysprite:
self.guysprite.destroy()
grav = 0
self.guysprite = Guy(self.mousex - 30, self.mousey - 30)
def U(self, event):
global grav
if grav == 0:
grav = -10
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.y += 50
def D(self, event):
self.guysprite.y += 5
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.y -= 5
def R(self, event):
self.guysprite.x += 10
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.x -= 10
def L(self, event):
self.guysprite.x -= 10
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.x += 10
def step(self):
global grav
global springgrav
if self.guysprite:
grav += 0.5
self.guysprite.y += grav
collisions = self.guysprite.collidingWithSprites(Brick)
if collisions:
self.guysprite.y -= grav
grav = 0
sprang = self.guysprite.collidingWithSprites(Spring)
if sprang:
grav -= 10
self.guysprite.y += grav
myapp = Platformer(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run()
|
fflewddur/python-phash
|
docs/conf.py
|
Python
|
cc0-1.0
| 8,370
| 0.007407
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import phash
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Boilerplate'
copyright = u'2014, Chris Adams'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = phash.__version__
# The full version, including alpha/beta/rc tags.
release = phash.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phashdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'phash.tex', u'Python Boilerplate Documentation',
u'Chris Adams', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show
|
URL addresses after external links.
#latex_show_urls = False
# Documents
|
to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phash', u'Python Boilerplate Documentation',
[u'Chris Adams'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phash', u'Python Boilerplate Documentation',
u'Chris Adams', 'phash', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# H
|
guyromm/greencouriers
|
greencouriers/tests/functional/test_courier.py
|
Python
|
gpl-3.0
| 209
| 0.004785
|
from greencouriers.tests import *
class TestCourierController(TestController):
def test_index(self):
response = self.app.get(url(controller='courier', a
|
ction='index'))
# Test
|
response...
|
caseydunham/tweet-dump
|
tweet-dump.py
|
Python
|
mit
| 5,740
| 0.005401
|
"""
Copyright (c) 2012 Casey Dunham <casey.dunham@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = 'Casey Dunham <casey.dunham@gmail.com>'
__version__ = '0.1'
import argparse
import urllib
import sys
from urllib2 import (Request, urlopen, HTTPError, URLError)
try:
# Python >= 2.6
import json
except ImportError:
try:
# Python < 2.6
import simplejson as json
except ImportError:
try:
# Google App Engine
from django.utils import simplejson as json
except ImportError:
raise ImportError, "Unable to load a json library"
class TweetDumpError(Exception):
@property
def message(self):
return self.args[0]
class RateLimitError(TweetDumpError):
pass
API_URL = "https://api.twitter.com/1/statuses/user_timeline.json?%s"
# we are not authenticating so this will return the rate limit based on our IP
# see (https://dev.twitter.com/docs/api/1/get/account/rate_limit_status)
RATE_LIMIT_API_URL = "https://api.twitter.com/1/account/rate_limit_status.json"
parser = argparse.ArgumentParser(description="dump all tweets from user")
parser.add_argument("handle", type=str, help="twitter screen name")
def get_tweets(screen_name, count, maxid=None):
params = {
"screen_name": screen_name,
"count": count,
"exclude_replies": "true",
"include_rts": "true"
}
# if we include the max_id from the last tweet we retrieved, we will retrieve the same tweet again
# so decrement it by one to not retrieve duplicate tweets
if maxid:
params["max_id"] = int(maxid) - 1
encoded_params = urllib.urlencode(params)
query = API_
|
URL % encoded_params
resp = fetch_url(query)
ratelimit_limit = resp.headers["X-RateLimit-Limit"]
ratelimit_remaining = resp.headers["X-RateLimit-Remaining"]
ratelimit_reset = re
|
sp.headers["X-RateLimit-Reset"]
tweets = json.loads(resp.read())
return ratelimit_remaining, tweets
def get_initial_rate_info():
resp = fetch_url(RATE_LIMIT_API_URL)
rate_info = json.loads(resp.read())
return rate_info["remaining_hits"], rate_info["reset_time_in_seconds"], rate_info["reset_time"]
def fetch_url(url):
try:
return urlopen(Request(url))
except HTTPError, e:
if e.code == 400: # twitter api limit reached
raise RateLimitError(e.code)
if e.code == 502: # Bad Gateway, sometimes get this when making requests. just try again
raise TweetDumpError(e.code)
print >> sys.stderr, "[!] HTTP Error %s: %s" % (e.code, e.msg)
except URLError, e:
print >> sys.stderr, "[!] URL Error: %s URL: %s" % (e.reason, url)
exit(1)
def print_banner():
print "tweet-dump %s (c) 2012 %s" % (__version__, __author__)
print """ .-.
(. .)__,')
/ V )
\ ( \/ .
`._`.__\\ o ,
<< `' .o..
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="tweet-dump")
parser.add_argument('username', help="Twitter Screen Name")
parser.add_argument('file', help="File to write tweeets to")
parser.add_argument('--count', help="Number of tweets to retrieve per request", default=200)
parser.add_argument('--maxid', help="ID of Tweet to start dumping after", default=None)
args = parser.parse_args()
screen_name = args.username
count = args.count
maxid = args.maxid
out_file_name = args.file
out_file = None
try:
out_file = open(out_file_name, 'w')
except IOError, e:
print >> sys.stderr, "[!] error creating file %s" % out_file_name
exit(1)
print_banner()
print "[*] writing tweets to %s \n[*] dumping tweets for user %s" % (out_file_name, screen_name)
#print "[*] dumping tweets for user %s" % screen_name,
max_requests = 5
requests_made = 0
tweet_count = 0
while True:
# get initial rate information
(remaining, rst_time_s, rst_time) = get_initial_rate_info()
while remaining > 0:
try:
(remaining, tweets) = get_tweets(screen_name, count, maxid)
except RateLimitError:
pass
except TweetDumpError, e:
pass
else:
requests_made += 1
if len(tweets) > 0:
for tweet in tweets:
maxid = tweet["id"]
out_file.write(u"%s %s: %s\n" % (tweet["created_at"], maxid, repr(tweet["text"])))
tweet_count += 1
else:
print "[*] reached end of tweets"
break
break
print "[*] %d tweets dumped!" % tweet_count
|
enthought/etsproxy
|
enthought/envisage/resource/resource_manager.py
|
Python
|
bsd-3-clause
| 103
| 0
|
# proxy
|
module
from __future__ import absolute_import
from envisage.resource.resource_manager
|
import *
|
mhrivnak/pulp
|
client_consumer/pulp/client/consumer/config.py
|
Python
|
gpl-2.0
| 4,554
| 0.001976
|
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import os
import socket
from pulp.common.config import Config, REQUIRED, ANY, NUMBER, BOOL, OPTIONAL
DEFAULT = {
'server': {
'host': socket.gethostname(),
'port': '443',
'api_prefix': '/pulp/api',
'rsa_pub': '/etc/pki/pulp/consumer/server/rsa_pub.key',
'verify_ssl': 'true',
'ca_path': '/etc/pki/tls/certs/ca-bundle.crt',
},
'authentication': {
'rsa_key': '/etc/pki/pulp/consumer/rsa.key',
'rsa_pub': '/etc/pki/pulp/consumer/rsa_pub.key'
},
'client': {
'role': 'consumer'
},
'filesystem': {
'extensions_dir': '/usr/lib/pulp/consumer/extensions',
'repo_file': '/etc/yum.repos.d/pulp.repo',
'mirror_list_dir': '/etc/yum.repos.d',
'gpg_keys_dir': '/etc/pki/pulp-gpg-keys',
'cert_dir': '/etc/pki/pulp/client/repo',
'id_cert_dir': '/etc/pki/pulp/consumer/',
'id_cert_filename': 'consumer-cert.pem',
},
'reboot': {
'permit': 'false',
'delay': '3',
},
'output': {
'poll_frequency_in_seconds': '1',
'enable_color': 'true',
'wrap_to_terminal': 'false',
'wrap_width': '80',
},
'messagi
|
ng': {
'scheme': 'amqp',
'host': None,
'port': '5672',
'transport': 'qpid',
|
'cacert': None,
'clientcert': None,
},
'profile': {
'minutes': '240',
}
}
SCHEMA = (
('server', REQUIRED,
(
('host', REQUIRED, ANY),
('port', REQUIRED, NUMBER),
('api_prefix', REQUIRED, ANY),
('verify_ssl', REQUIRED, BOOL),
('ca_path', REQUIRED, ANY),
('rsa_pub', REQUIRED, ANY),
)
),
('authentication', REQUIRED,
(
('rsa_key', REQUIRED, ANY),
('rsa_pub', REQUIRED, ANY),
)
),
('client', REQUIRED,
(
('role', REQUIRED, r'consumer'),
)
),
('filesystem', REQUIRED,
(
('extensions_dir', REQUIRED, ANY),
('repo_file', REQUIRED, ANY),
('mirror_list_dir', REQUIRED, ANY),
('gpg_keys_dir', REQUIRED, ANY),
('cert_dir', REQUIRED, ANY),
('id_cert_dir', REQUIRED, ANY),
('id_cert_filename', REQUIRED, ANY),
)
),
('reboot', REQUIRED,
(
('permit', REQUIRED, BOOL),
('delay', REQUIRED, NUMBER),
)
),
('output', REQUIRED,
(
('poll_frequency_in_seconds', REQUIRED, NUMBER),
('enable_color', REQUIRED, BOOL),
('wrap_to_terminal', REQUIRED, BOOL),
('wrap_width', REQUIRED, NUMBER)
)
),
('messaging', REQUIRED,
(
('scheme', REQUIRED, r'(tcp|ssl|amqp|amqps)'),
('host', OPTIONAL, ANY),
('port', REQUIRED, NUMBER),
('transport', REQUIRED, ANY),
('cacert', OPTIONAL, ANY),
('clientcert', OPTIONAL, ANY)
)
),
('profile', REQUIRED,
(
('minutes', REQUIRED, NUMBER),
)
),
)
def read_config(paths=None, validate=True):
"""
Read and validate the consumer configuration.
:param validate: Validate the configuration.
:param validate: bool
:param paths: A list of paths to configuration files to read.
Reads the standard locations when not specified.
:param paths: list
:return: A configuration object.
:rtype: Config
"""
if not paths:
paths = ['/etc/pulp/consumer/consumer.conf']
conf_d_dir = '/etc/pulp/consumer/conf.d'
paths += [os.path.join(conf_d_dir, i) for i in sorted(os.listdir(conf_d_dir))]
overrides = os.path.expanduser('~/.pulp/consumer.conf')
if os.path.exists(overrides):
paths.append(overrides)
config = Config(DEFAULT)
config.update(Config(*paths))
if validate:
config.validate(SCHEMA)
return config
|
8l/beri
|
cheritest/trunk/tests/cp2/test_cp2_x_cincoffset_sealed.py
|
Python
|
apache-2.0
| 2,058
| 0.003887
|
#-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership
|
. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file
|
except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_x_cincoffset_sealed(BaseBERITestCase):
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_1(self):
'''Test that CIncOffset on a sealed capability does not change the offsrt'''
self.assertRegisterEqual(self.MIPS.a0, 0, "CIncOffset changed the offset of a sealed capability")
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_2(self):
'''Test that CIncOffset on a sealed capability raised an exception'''
self.assertRegisterEqual(self.MIPS.a2, 1, "CIncOffset on a sealed capability did not raise an exception")
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_3(self):
'''Test that CIncOffset on a sealed capability sets CapCause'''
self.assertRegisterEqual(self.MIPS.a3, 0x0301, "CIncOffset on a sealed capability did not set CapCause correctly")
|
jezdez/django-constance
|
tests/settings.py
|
Python
|
bsd-3-clause
| 3,003
| 0
|
from datetime import datetime, date, time, timedelta
from decimal import Decimal
SECRET_KEY = 'cheese'
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
'secondary': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.staticfiles',
'django.cont
|
rib.auth',
'django.contrib.contenttypes',
'django
|
.contrib.sessions',
'django.contrib.messages',
'constance',
'constance.backends.database',
)
ROOT_URLCONF = 'tests.urls'
CONSTANCE_REDIS_CONNECTION_CLASS = 'tests.redis_mockup.Connection'
CONSTANCE_ADDITIONAL_FIELDS = {
'yes_no_null_select': [
'django.forms.fields.ChoiceField',
{
'widget': 'django.forms.Select',
'choices': ((None, "-----"), ("yes", "Yes"), ("no", "No"))
}
],
# note this intentionally uses a tuple so that we can test immutable
'email': ('django.forms.fields.EmailField',),
}
USE_TZ = True
CONSTANCE_CONFIG = {
'INT_VALUE': (1, 'some int'),
'BOOL_VALUE': (True, 'true or false'),
'STRING_VALUE': ('Hello world', 'greetings'),
'DECIMAL_VALUE': (Decimal('0.1'), 'the first release version'),
'DATETIME_VALUE': (datetime(2010, 8, 23, 11, 29, 24),
'time of the first commit'),
'FLOAT_VALUE': (3.1415926536, 'PI'),
'DATE_VALUE': (date(2010, 12, 24), 'Merry Chrismas'),
'TIME_VALUE': (time(23, 59, 59), 'And happy New Year'),
'TIMEDELTA_VALUE': (timedelta(days=1, hours=2, minutes=3), 'Interval'),
'CHOICE_VALUE': ('yes', 'select yes or no', 'yes_no_null_select'),
'LINEBREAK_VALUE': ('Spam spam', 'eggs\neggs'),
'EMAIL_VALUE': ('test@example.com', 'An email', 'email'),
}
DEBUG = True
STATIC_ROOT = './static/'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'constance.context_processors.config',
],
},
},
]
|
unomena/django-1.8-registration
|
registration/backends/default/urls.py
|
Python
|
bsd-3-clause
| 2,086
| 0.000959
|
"""
URLconf for registration and activation, using django-registration's
default backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.default.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead.
"""
from django.conf.urls import *
from django.views import generic as generic_views
from registration.views import activate
from registration.views import register
urlpatterns = [
url(
r'^activate/complete/$',
generic_views.TemplateView.as_view(
template_name='registration/activation_complete.html'
),
name='registration_activation_complete'
),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# conf
|
using 404.
url(
r'^activate/(?P<activation_key>\w+)/$',
activate,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_activate'
),
url(
r'^register/$',
register,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_register'
),
url(
r'^register/complete/$',
generic_views.TemplateView.as_view(
|
template_name='registration/registration_complete.html'
),
name='registration_complete'
),
url(
r'^register/closed/$',
generic_views.TemplateView.as_view(
template_name='registration/registration_closed.html'
),
name='registration_disallowed'
),
url(r'', include('registration.auth_urls')),
]
|
scottkirkwood/mm2s5
|
build_all.py
|
Python
|
apache-2.0
| 556
| 0.01259
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Scott Kirkwood. All Rights Reserved.
"""
Build everything for mm2s5.
You'll need:
sudo apt-get install alien help2man fakeroot lintian
Also python-bdist
"""
from pybdist import pybdist
import opt
|
parse
import setup
def main():
parser = optparse.OptionParser()
pybdist.add_standard_options(parser)
(options, unused_args) = parser.parse_args()
if not pybdist.handle_standard_options(options, setup):
print 'Doing nothing. --help for commands.'
if __name__ == '__main__':
|
main()
|
mahabuber/erpnext
|
erpnext/accounts/doctype/cost_center/test_cost_center.py
|
Python
|
agpl-3.0
| 236
| 0.004237
|
# C
|
opyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('Cost Ce
|
nter')
|
tomgross/pp-site
|
src/itc.pptheme/itc/pptheme/browser/view.py
|
Python
|
apache-2.0
| 4,725
| 0.001058
|
# -*- coding: utf-8 -*-
__author__ = 'itconsense@gmail.com'
from collections import OrderedDict
from math import pi
from Products.Five import BrowserView
from plone import api
import base64
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import six
LOG = logging.getLogger('evaluate')
class UpgradeIt(BrowserView):
def __call__(self):
portal_setup = api.portal.get_tool(name='portal_setup')
portal_setup.runImportStepFromProfile(
'profile-plonetheme.sunburst:default', 'cssregistry', run_dependencies=False)
portal_skins = api.portal.get_tool(name='portal_skins')
custom = portal_skins['custom']
for oid in ['main_template', 'base_properties', 'ploneCustom.css']:
if oid in custom:
api.content.delete(obj=custom[oid])
return "DONE"
class Result(object):
def __init__(self):
self.good = ''
self.details = {}
class EvaluateTestView(BrowserView):
no_text = 'Kein Textbaustein'
factors = {
'Meistens': 5,
'Manchmal': 3,
'Selten': 1,
'Nie': 0
}
pie_factors = {
'Meistens': 3,
'Manchmal': 2,
'Selten': 1,
'Nie': 0
}
chart_img = ''
def get_detail_elements(self):
zope_script = self.context.restrictedTraverse('text_detail_elements')
return zope_script()
def get_summary_elements(self):
zope_script = self.context.restrictedTraverse('text_summary_elements')
return zope_script()
def text_blocks(self):
result = OrderedDict()
form = self.request.form
summary = 0
df = OrderedDict()
elements = self.get_detail_elements()
for i, group in enumerate(elements.keys()):
if group not in form:
continue
group_title = self.context[group].Title()
result[group_title] = Result()
good_values = []
for key, val in form[group].items():
summary += self.factors[val]
element = elements[group].get(key, self.no_text)
title = element.get('Titel', group_title)
if val == 'Meistens':
good_values.append(title)
continue
text = element.get(val)
if not text:
continue
if val in element:
result[group_title].details[title] = text
else:
|
result[group_title].details[title] = element.get('default')
u_group_title = unicode(group_title, 'utf-8')
if u_group_title not in df:
df[u_group_title] = 0
df[u_group_title] += self.pie_factors[val]
if good_values:
result[group
|
_title].good = ', '.join(good_values)
if not result[group_title].details:
LOG.warn('Details of group {0} are empty!'.format(group))
summary_elements = self.get_summary_elements()
if summary < 75:
result['summary'] = summary_elements['bad']
elif 75 >= summary < 130:
result['summary'] = summary_elements['med']
else:
result['summary'] = summary_elements['good']
self.chart_img = 'data:image/jpeg;base64, ' + self.get_radar_chart(df)
self.legend = df.keys()
return result
def get_radar_chart(self, df):
LOG.info('{0}'.format(df))
# number of variable
categories = list(df)
N = len(categories)
# We are going to plot the first line of the data frame.
# But we need to repeat the first value to close the circular graph:
values = df.values()
values.append(values[0])
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
fig = plt.figure()
ax = plt.subplot(111, polar=True)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], range(1, N+1), color='grey', size=8, rotation='vertical')
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([])
plt.ylim(0, min(21, max(values)) + 1)
# Plot data
ax.plot(angles, values, linewidth=1, linestyle='solid')
# Fill area
ax.fill(angles, values, 'b', alpha=0.1)
fig.savefig('test.png')
img = six.BytesIO()
fig.savefig(img, format='png')
img.seek(0)
return base64.b64encode(img.read())
|
mishravikas/geonode-cas
|
geonode/base/admin.py
|
Python
|
gpl-3.0
| 4,283
| 0.008872
|
from django.contrib import admin
from django.conf import settings
from geonode.base.m
|
odels import (TopicCategory, SpatialRepresentationType,
Region, RestrictionCodeType, ContactRole, ResourceBase, Link, License, Thumbnail)
class LicenseAdmin(admin.ModelAdmin):
model = License
list_display = ('id', 'name')
list_display_links
|
= ('name',)
class ResourceBaseAdmin(admin.ModelAdmin):
list_display = ('id','title', 'date', 'category')
list_display_links = ('id',)
class TopicCategoryAdmin(admin.ModelAdmin):
model = TopicCategory
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
if settings.MODIFY_TOPICCATEGORY==False:
exclude = ('identifier', 'description',)
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
if settings.MODIFY_TOPICCATEGORY:
return True
else:
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
if settings.MODIFY_TOPICCATEGORY:
return True
else:
return False
class RegionAdmin(admin.ModelAdmin):
model = Region
list_display_links = ('name',)
list_display = ('code', 'name')
search_fields = ('code', 'name',)
class SpatialRepresentationTypeAdmin(admin.ModelAdmin):
model = SpatialRepresentationType
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
return False
class RestrictionCodeTypeAdmin(admin.ModelAdmin):
model = RestrictionCodeType
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
return False
class ContactRoleAdmin(admin.ModelAdmin):
model = ContactRole
list_display_links = ('id',)
list_display = ('id','contact', 'resource', 'role')
list_editable = ('contact', 'resource', 'role')
class LinkAdmin(admin.ModelAdmin):
model = Link
list_display_links = ('id',)
list_display = ('id', 'resource', 'extension', 'link_type', 'name', 'mime')
list_filter = ('resource', 'extension', 'link_type', 'mime')
search_fields = ('name', 'resource__title',)
class ThumbnailAdmin(admin.ModelAdmin):
model = Thumbnail
list_display = ('get_title', 'get_geonode_type', 'thumb_file', 'get_thumb_url',)
search_fields = ('resourcebase__title',)
def get_title(self, obj):
rb = obj.resourcebase_set.all()[0] # should be always just one!
return rb.title
get_title.short_description = 'Title'
def get_thumb_url(self, obj):
rb = obj.resourcebase_set.all()[0] # should be always just one!
return u'<img src="%s" alt="%s" height="80px" />' % (rb.get_thumbnail_url(),
obj.id)
get_thumb_url.allow_tags = True
get_thumb_url.short_description = 'URL'
def get_geonode_type(self, obj):
rb = obj.resourcebase_set.all()[0] # should be always just one!
return rb.class_name
get_geonode_type.short_description = 'Type'
admin.site.register(TopicCategory, TopicCategoryAdmin)
admin.site.register(Region, RegionAdmin)
admin.site.register(SpatialRepresentationType, SpatialRepresentationTypeAdmin)
admin.site.register(RestrictionCodeType, RestrictionCodeTypeAdmin)
admin.site.register(ContactRole, ContactRoleAdmin)
admin.site.register(ResourceBase, ResourceBaseAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(Thumbnail, ThumbnailAdmin)
admin.site.register(License, LicenseAdmin)
|
Nikea/VisTrails
|
contrib/cdat/scripts/init_inc.py
|
Python
|
bsd-3-clause
| 1,823
| 0.012068
|
############################################################################
##
## Copyright (C) 2006-2008 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
###########################################################################
|
#
""" Do not edit this file!
File automatically generated by scripts/gen_init.py
Change History:
version : description
0.2 : Integrated quickplot module that displays the CDAT plot
widget inside the spreadsheet
0.1 : First automatically generated package based on xml descriptions
"""
from PyQt4 import QtCore, QtGu
|
i
import sip
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import (Module, NotCacheable,
ModuleError, new_module)
from core.bundles import py_import
import os, sys
#cdat specific packages
vcs = py_import('vcs',{})
cdms2 = py_import('cdms2', {})
cdutil = py_import('cdutil', {})
#local python modules
from cdat_window import QCDATWindow
from cdat_cell import QCDATWidget
from quickplot import quickplot
|
newmediamedicine/indivo_server_1_0
|
indivo/tests/integration/test_modules/sharing.py
|
Python
|
gpl-3.0
| 7,841
| 0.01658
|
import data
from utils import assert_403, assert_404, assert_200, parse_xml, xpath
PRD = 'prd'
def test_sharing(IndivoClient):
DS = 'ds'
def get_datastore(obj):
if hasattr(obj, DS):
return getattr(obj, DS).values()
return False
def set_datastore(obj, **kwargs):
if hasattr(obj, DS):
ds = getattr(obj, DS)
for kwarg, value in kwargs.items():
if hasattr(ds, kwarg):
setattr(ds, kwarg, value)
return obj
raise ValueError
def alice_setup(record_id, bob_account_id):
allergy_type = {'type' : 'http://indivo.org/vocab/xml/documents#Allergy'}
alice_chrome_client = IndivoClient('chrome', 'chrome')
alice_chrome_client.create_session(data.account)
alice_chrome_client.read_record(record_id=record_id)
alice_chrome_client.get_account_permissions(account_id=data.account['account_id'])
alice_chrome_client.get_account_records(account_id = data.account['account_id'])
# Alice posts a document
# (We save the first doc instead of zero
# due to the contact doc already in alice's account)
alice_chrome_client.post_document(data=data.doc01)
document_id = alice_chrome_client.read_documents().response[PRD]['Document'][1]
# Save the document_id in the client's datastore
alice_chrome_client.ds.document_id = document_id
# Save the first carenet_id in the client's datastore
carenet_id = alice_chrome_client.get_record_carenets().response[PRD]['Carenet'][0]
# post four documents to Alice's record, 2 allergies and 2 immunizations
document_1_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy00)), "/Document/@id")[0]
document_2_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy01)), "/Document/@id")[0]
document_3_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization)), "/Document/@id")[0]
document_4_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization2)), "/Document/@id")[0]
# and one more to test nevershare
document_5_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy02)), "/Document/@id")[0]
# auto-share allergies
alice_chrome_client.post_autoshare(data=allergy_type, carenet_id=carenet_id)
assert_200(alice_chrome_client.get_autoshare_bytype_all(record_id=record_id))
# unshare that one allergy, which should negate the autoshare
alice_chrome_client.delete_carenet_document(record_id = record_id, document_id = document_2_id, carenet_id=carenet_id)
# nevershare the third allergy
alice_chrome_client.document_nevershare_set(record_id = record_id, document_id = document_5_id)
# immunizations are individually shared (well only one of them)
alice_chrome_client.post_carenet_document(document_id = document_3_id, carenet_id=carenet_id)
# Alice shares her contact document(s) with the carenet
contact_doc = parse_xml(alice_chrome_client.read_documents(record_id = record_id, parameters={'type':'Contact'}))
for doc_id in xpath(contact_doc, '/Documents/Document/@id'):
alice_chrome_client.post_carenet_document(record_id = record_id, document_id = doc_id, carenet_id = carenet_id)
# Alice adds bob_account_id to carenet[0]
alice_chrome_client.post_carenet_account(carenet_id = carenet_id, data='account_id=' + bob_account_id + '&write=false')
# Review all accounts within carenet[0]
assert xpath(parse_xml(alice_chrome_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')
alice_chrome_client.get_carenet_apps(carenet_id = carenet_id)
alice_chrome_client.read_allergies(record_id = record_id)
# Finally, return the carenet_id, document_id
# in order to check Bob's access
# and a second document that is disallowed
return carenet_id, [document_1_id, document_3_id], [document_2_id, document_4_id, document_5_id]
def bob_setup(bob_account_id, record_id, carenet_id, allowed_docs, disallowed_docs):
bob_chrome_client = IndivoClient('chrome', 'chrome')
bob_chrome_client.create_session(data.account02)
# SZ: Bob should NOT be able to read the docs directly in the record
for doc_id in allowed_docs+disallowed_docs:
assert_403(bob_chrome_client.read_document(record_id=record_id, document_id=doc_id))
assert_403(bob_chrome_client.get_record_carenets(record_id=record_id))
# Bob should be able to read the allowed docs
for doc_id in allowed_docs:
assert_200(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should not be able to read the disallowed docs
for doc_id in disallowed_docs:
assert_404(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should be able to list docs in the carenet
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id).response[PRD]['Document']
# with a parameter
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id, parameters={'type': 'http://indivo.org/vocab/xml/documents#Allergy'}).response[PRD]['Document']
# Read carenet allergies
assert_200(bob_chrome_client.read_carenet_allergies(carenet_id = carenet_id))
assert_200(bob_chrome_client.read_carenet_problems(carenet_id = carenet_id))
# Read the contact document, this should work
contact_doc = parse_xml(bob_chrome_client.read_carenet_special_document(carenet_id = carenet_id, special_document='contact'))
contact_name = xpath(contact_doc, '/ns:Contact/ns:name/ns:fullName/text()', namespaces={'ns':'http://indivo.org/vocab/xml/documents#'})
assert(contact_name)
bob_chrome_client.get_account_permissions(account_id=bob_account_id)
bob_chrome_client.get_carenet_account_permissions(carenet_id= carenet_id,
record_id=record_id,
account_id=bob_account_id)
# Not
|
yet implemented
#bob_chrome_client.get_carenet_app_permissions(account_id=bob_account_id)
return True
d
|
ef admin_setup(bob_account_id):
admin_client = IndivoClient(data.machine_app_email, data.machine_app_secret)
admin_client.set_app_id(data.app_email)
# Create a record for Alice and set her at the owner
record_id = admin_client.create_record(data=data.contact).response[PRD]['Record'][0]
admin_client.set_record_owner(data=data.account['account_id'])
# Create a basic set of carenets
carenet_names = ['Family2', 'Friends2', 'School/Office']
for cname in carenet_names:
admin_client.create_carenet(data='name=' + cname)
# Check to make sure the admin can list the carenets and the accounts within each one
carenets = xpath(parse_xml(admin_client.get_record_carenets(record_id = record_id)),'/Carenets/Carenet/@id')
for carenet_id in carenets:
assert len(xpath(parse_xml(admin_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')) > 0
return record_id
bob_account_id = 'benadida@informedcohort.org'
# Admin spawning carenets under Alice's newly created record
record_id = admin_setup(bob_account_id)
# Given Bob's account id and a record that has been set up for her
# Alice gives Bob the document_id that she'd like to share with him
# Even though Alice gives Bob a document_id, Bob has the ability
# to read all documents within the carenet that Alice added him to
# 2010-09-13 now Alice also shares her contact URL and we check
# that Bob can read it at the special URL
carenet_id, allowed_documents, disallowed_documents = alice_setup(record_id, bob_account_id)
return bob_setup(bob_account_id, record_id, carenet_id, allowed_documents, disallowed_documents)
|
paris-ci/CloudBot
|
plugins/youtube.py
|
Python
|
gpl-3.0
| 6,992
| 0.003581
|
import re
import time
import isodate
import requests
from cloudbot import hook
from cloudbot.util import timeformat
from cloudbot.util.formatting import pluralize
from cloudbot.util.colors import parse
youtube_re = re.compile(r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)([-_a-zA-Z0-9]+)', re.I)
base_
|
url = 'https://www.googleapis.com/youtube/v3/'
api_url = base_url + 'videos?part=contentDetails%2C+snippet%2C+statistics&id={}&key={}'
search_api_url = base_url + 'search?part=id&maxResults=1'
playlist_api_url = base_url + 'playlists?part=snippet%2CcontentDetails%2Cstatus'
video_url = "http://youtu.be/%s"
err_no_api = "
|
The YouTube API is off in the Google Developers Console."
time_last_request = time.time()
def get_video_description(video_id):
global time_last_request
time_elapsed = time.time() - time_last_request
if time_elapsed > 10:
time_last_request = time.time()
else:
#return "This looks like a YouTube video. However, the YT api have been called too much, I'm sorry I won't be able to fetch details for you."
return None
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return
data = json['items']
snippet = data[0]['snippet']
statistics = data[0]['statistics']
content_details = data[0]['contentDetails']
out = '\x02{}\x02'.format(snippet['title'])
if not content_details.get('duration'):
return out
length = isodate.parse_duration(content_details['duration'])
out += ' - length \x02{}\x02'.format(timeformat.format_time(int(length.total_seconds()), simple=True))
total_votes = float(statistics['likeCount']) + float(statistics['dislikeCount'])
if total_votes != 0:
# format
likes = pluralize(int(statistics['likeCount']), "like")
dislikes = pluralize(int(statistics['dislikeCount']), "dislike")
percent = 100 * float(statistics['likeCount']) / total_votes
likes = parse("$(dark_green)" + likes + "$(clear)")
dislikes = parse("$(dark_red)" + dislikes + "$(clear)")
out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
if 'viewCount' in statistics:
views = int(statistics['viewCount'])
out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
uploader = snippet['channelTitle']
upload_time = time.strptime(snippet['publishedAt'], "%Y-%m-%dT%H:%M:%S.000Z")
out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in content_details:
out += ' - \x034NSFW\x02'
# return re.sub(
# r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
# '[URL]', out)
return out.replace("youtu", "you tu") #nup. No spam please
@hook.on_start()
def load_key(bot):
global dev_key
dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None)
@hook.regex(youtube_re)
def youtube_url(match, event):
if event.chan == "#harmonyhosting": # if the channel is #harmonyhosting
return None # return None, canceling the action
return get_video_description(match.group(1))
@hook.command("youtube", "you", "yt", "y")
def youtube(text):
"""youtube <query> -- Returns the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
return get_video_description(video_id) + " - " + video_url % video_id
@hook.command("youtime", "ytime")
def youtime(text):
"""youtime <query> -- Gets the total run time of the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
return
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
statistics = data[0]['statistics']
if not content_details.get('duration'):
return
length = isodate.parse_duration(content_details['duration'])
l_sec = int(length.total_seconds())
views = int(statistics['viewCount'])
total = int(l_sec * views)
length_text = timeformat.format_time(l_sec, simple=True)
total_text = timeformat.format_time(total, accuracy=8)
return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(snippet['title'], length_text, views,
total_text)
ytpl_re = re.compile(r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@hook.regex(ytpl_re)
def ytplaylist_url(match, event):
global time_last_request
time_elapsed = time.time() - time_last_request
if time_elapsed > 10:
time_last_request = time.time()
else:
#return "This looks like a YouTube Playlist. However, the YT api have been called too much, I'm sorry I won't be able to fetch details for you."
return None
if event.chan == "#harmonyhosting": # if the channel is #harmonyhosting
return None # return None, canceling the action
location = match.group(4).split("=")[-1]
json = requests.get(playlist_api_url, params={"id": location, "key": dev_key}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error looking up playlist.'
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
title = snippet['title']
author = snippet['channelTitle']
num_videos = int(content_details['itemCount'])
count_videos = ' - \x02{:,}\x02 video{}'.format(num_videos, "s"[num_videos == 1:])
return "\x02{}\x02 {} - \x02{}\x02".format(title, count_videos, author)
|
eusebioaguilera/scalablemachinelearning
|
Lab04/ML_lab4_ctr_student.py
|
Python
|
gpl-3.0
| 54,867
| 0.004903
|
# coding: utf-8
# 
# # **Click-Through Rate Prediction Lab**
# #### This lab covers the steps for creating a click-through rate (CTR) prediction pipeline. You will work with the [Criteo Labs](http://labs.criteo.com/) dataset that was used for a recent [Kaggle competition](https://www.kaggle.com/c/criteo-display-ad-challenge).
# #### ** This lab will cover: **
# + ####*Part 1:* Featurize categorical data using one-hot-encoding (OHE)
# + ####*Part 2:* Construct an OHE dictionary
# + ####*Part 3:* Parse CTR data and generate OHE features
# + #### *Visualization 1:* Feature frequency
# + ####*Part 4:* CTR prediction and logloss evaluation
# + #### *Visualization 2:* ROC curve
# + ####*Part 5:* Reduce feature dimension via feature hashing
# + #### *Visualization 3:* Hyperparameter heat map
#
# #### Note that, for reference, you can look up the details of the relevant Spark methods in [Spark's Python API](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) and the relevant NumPy methods in the [NumPy Reference](http://docs.scipy.org/doc/numpy/reference/index.html)
# In[1]:
labVersion = 'cs190_week4_v_1_3'
# ### ** Part 1: Featurize categorical data using one-hot-encoding **
# #### ** (1a) One-hot-encoding **
# #### We would like to develop code to convert categorical features to numerical ones, and to build intuition, we will work with a sample unlabeled dataset with three data points, with each
|
data point representing an animal. The first feature indicates the type of animal (bear, cat, mouse); the second feature describes the animal's color (black, tabby); and the third (optional) feature describes what the animal eats (mouse,
|
salmon).
# #### In a one-hot-encoding (OHE) scheme, we want to represent each tuple of `(featureID, category)` via its own binary feature. We can do this in Python by creating a dictionary that maps each tuple to a distinct integer, where the integer corresponds to a binary feature. To start, manually enter the entries in the OHE dictionary associated with the sample dataset by mapping the tuples to consecutive integers starting from zero, ordering the tuples first by featureID and next by category.
# #### Later in this lab, we'll use OHE dictionaries to transform data points into compact lists of features that can be used in machine learning algorithms.
# In[2]:
# Data for manual OHE
# Note: the first data point does not include any value for the optional third feature
sampleOne = [(0, 'mouse'), (1, 'black')]
sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
sampleDataRDD = sc.parallelize([sampleOne, sampleTwo, sampleThree])
# In[3]:
# TODO: Replace <FILL IN> with appropriate code
sampleOHEDictManual = {}
sampleOHEDictManual[(0,'bear')] = 0
sampleOHEDictManual[(0,'cat')] = 1
sampleOHEDictManual[(0,'mouse')] = 2
sampleOHEDictManual[(1,'black')] = 3
sampleOHEDictManual[(1,'tabby')] = 4
sampleOHEDictManual[(2,'mouse')] = 5
sampleOHEDictManual[(2,'salmon')] = 6
# In[4]:
# TEST One-hot-encoding (1a)
from test_helper import Test
Test.assertEqualsHashed(sampleOHEDictManual[(0,'bear')],
'b6589fc6ab0dc82cf12099d1c2d40ab994e8410c',
"incorrect value for sampleOHEDictManual[(0,'bear')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'cat')],
'356a192b7913b04c54574d18c28d46e6395428ab',
"incorrect value for sampleOHEDictManual[(0,'cat')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'mouse')],
'da4b9237bacccdf19c0760cab7aec4a8359010b0',
"incorrect value for sampleOHEDictManual[(0,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'black')],
'77de68daecd823babbb58edb1c8e14d7106e83bb',
"incorrect value for sampleOHEDictManual[(1,'black')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'tabby')],
'1b6453892473a467d07372d45eb05abc2031647a',
"incorrect value for sampleOHEDictManual[(1,'tabby')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'mouse')],
'ac3478d69a3c81fa62e60f5c3696165a4e5e6ac4',
"incorrect value for sampleOHEDictManual[(2,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'salmon')],
'c1dfd96eea8cc2b62785275bca38ac261256e278',
"incorrect value for sampleOHEDictManual[(2,'salmon')]")
Test.assertEquals(len(sampleOHEDictManual.keys()), 7,
'incorrect number of keys in sampleOHEDictManual')
# #### ** (1b) Sparse vectors **
# #### Data points can typically be represented with a small number of non-zero OHE features relative to the total number of features that occur in the dataset. By leveraging this sparsity and using sparse vector representations of OHE data, we can reduce storage and computational burdens. Below are a few sample vectors represented as dense numpy arrays. Use [SparseVector](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.linalg.SparseVector) to represent them in a sparse fashion, and verify that both the sparse and dense representations yield the same results when computing [dot products](http://en.wikipedia.org/wiki/Dot_product) (we will later use MLlib to train classifiers via gradient descent, and MLlib will need to compute dot products between SparseVectors and dense parameter vectors).
# #### Use `SparseVector(size, *args)` to create a new sparse vector where size is the length of the vector and args is either a dictionary, a list of (index, value) pairs, or two separate arrays of indices and values (sorted by index). You'll need to create a sparse vector representation of each dense vector `aDense` and `bDense`.
# In[5]:
import numpy as np
from pyspark.mllib.linalg import SparseVector
# In[6]:
# TODO: Replace <FILL IN> with appropriate code
aDense = np.array([0., 3., 0., 4.])
aSparse = SparseVector(4, [[1, 3], [3., 4.]])
bDense = np.array([0., 0., 0., 1.])
bSparse = SparseVector(4, [(3, 1.)])
w = np.array([0.4, 3.1, -1.4, -.5])
print aDense.dot(w)
print aSparse.dot(w)
print bDense.dot(w)
print bSparse.dot(w)
# In[7]:
# TEST Sparse Vectors (1b)
Test.assertTrue(isinstance(aSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(isinstance(bSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(aDense.dot(w) == aSparse.dot(w),
'dot product of aDense and w should equal dot product of aSparse and w')
Test.assertTrue(bDense.dot(w) == bSparse.dot(w),
'dot product of bDense and w should equal dot product of bSparse and w')
# #### **(1c) OHE features as sparse vectors **
# #### Now let's see how we can represent the OHE features for points in our sample dataset. Using the mapping defined by the OHE dictionary from Part (1a), manually define OHE features for the three sample data points using SparseVector format. Any feature that occurs in a point should have the value 1.0. For example, the `DenseVector` for a point with features 2 and 4 would be `[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0]`.
# In[8]:
# Reminder of the sample features
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# In[9]:
# TODO: Replace <FILL IN> with appropriate code
sampleOneOHEFeatManual = SparseVector(7, [(2, 1.), (3, 1.)])
sampleTwoOHEFeatManual = SparseVector(7, [(1, 1.), (4, 1.), (5, 1.)])
sampleThreeOHEFeatManual = SparseVector(7, [(0, 1.), (3, 1.), (6, 1.)])
# In[10]:
# TEST OHE Features as sparse vectors (1c)
Test.assertTrue(isinstance(sampleOneOHEFeatManual, SparseVector),
'sampleOneOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleTwoOHEFeatManual, SparseVector),
'sampleTwoOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance
|
qbeenslee/Nepenthes-Server
|
config/param.py
|
Python
|
gpl-3.0
| 728
| 0.002747
|
# coding:utf-8
'''
Author : qbeenslee
Created : 15/4/3
'''
UID = 'uid'
TOKEN = 'token'
CLIENT = 'cl
|
ient'
DESCRIPTION = 'description'
EMAIL = 'email'
WHAT = 'what'
IMEI = 'imei'
USERNAME = 'nickname'
PWD = 'pwd'
IMAGE_FILES = 'imagefiles'
MSG = 'msg'
VERSION = 'version'
URL = 'url'
NICKNAME = 'nickname'
MOTTO = 'motto'
AVATAR = 'avatar'
WALLPAPER = 'wallpaper'
VERIFY_STATUS = 'verify_status'
LATITUDE = 'latitude'
LONGITUDE = 'longitude'
HIDE_LOCATION = 'hide_location'
PLACE_NAME = 'place_name'
RADIUS = 'radius'
LIMIT = 'limit'
OFFSET = 'offset'
PAGE = 'page'
PAGE_SI
|
ZE = 'page_size'
TOTAL_COUNT = 'total_count'
SORT_TYPE = 'sort_type'
CONTENT = 'content'
SID = 'sid'
HEIGHT = "height"
WIDTH = "width"
OPERATE='operate'
|
berkmancenter/mediacloud
|
apps/extract-article-from-page/bin/extract_article_from_page_http_server.py
|
Python
|
agpl-3.0
| 5,416
| 0.000554
|
#!/usr/bin/env python3
"""
Single-threaded HTTP server that extracts article's HTML from a full page HTML.
Accepts POST requests to "/extract" endpoint with body JSON:
{
"html": "<html><title>Title</title><body><p>Paragraph.</p></html>"
}
On success, returns HTTP 200 and extracted HTML:
{
"extracted_html": "Title\n\n<body id=\"readabilityBody\"><p>Paragraph.</p></body>",
"extractor_version": "readability-lxml-0.6.1"
}
On errors, returns HTTP 4xx / 5xx and error messag
|
e:
{
"error": "You're using it wrong."
}
"""
import argparse
|
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
from mediawords.util.parse_json import encode_json, decode_json
from mediawords.util.log import create_logger
from extract_article_from_page import extract_article_from_page, extractor_name
log = create_logger(__name__)
_MAX_HTML_LENGTH = 4 * 1024 * 1024
"""Extractor will refuse to extract HTML pages bigger than this."""
_MAX_REQUEST_LENGTH = _MAX_HTML_LENGTH + (10 * 1024)
"""HTTP server will refuse to serve requests larger than this."""
class ServerHandler(BaseHTTPRequestHandler):
# Allow HTTP/1.1 connections and so don't wait up on "Expect:" headers
protocol_version = "HTTP/1.1"
_API_ENDPOINT_PATH = "/extract"
def __json_response(self, status: int, response: dict) -> bytes:
json_response = encode_json(response)
encoded_json_response = json_response.encode("UTF-8", errors="replace")
self.send_response(status)
self.send_header("Content-Type", "application/json; charset=UTF-8")
self.send_header("Content-Length", len(encoded_json_response))
self.end_headers()
return encoded_json_response
def __error_response(self, status: int, message: str) -> bytes:
log.error(message)
return self.__json_response(status=status, response={"error": message})
def __success_response(self, status: int, response: dict) -> bytes:
response = self.__json_response(status=status, response=response)
log.info(f"Returning response ({len(response)} bytes)")
return response
def __post(self) -> bytes:
uri = urlparse(self.path)
if uri.path != self._API_ENDPOINT_PATH:
return self.__error_response(
status=HTTPStatus.NOT_FOUND.value,
message=f"Only {self._API_ENDPOINT_PATH} is implemented.",
)
content_length = int(self.headers.get('Content-Length', 0))
log.info(f"Received extraction request ({content_length} bytes)...")
if not content_length:
return self.__error_response(
status=HTTPStatus.LENGTH_REQUIRED.value,
message="Content-Length header is not set.",
)
if content_length > _MAX_REQUEST_LENGTH:
return self.__error_response(
status=HTTPStatus.REQUEST_ENTITY_TOO_LARGE.value,
message=f"Request is larger than {_MAX_REQUEST_LENGTH} bytes."
)
encoded_body = self.rfile.read(content_length)
try:
json_body = encoded_body.decode('utf-8', errors='replace')
except Exception as ex:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message=f"Unable to decode request body: {ex}",
)
try:
body = decode_json(json_body)
except Exception as ex:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message=f"Unable to decode request JSON: {ex}",
)
if "html" not in body:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message="Request JSON doesn't have 'html' key.",
)
html = body["html"]
try:
extracted_html = extract_article_from_page(html)
except Exception as ex:
return self.__error_response(
status=HTTPStatus.BAD_REQUEST.value,
message=f"Unable to extract article HTML from page HTML: {ex}"
)
response = {
'extracted_html': extracted_html,
'extractor_version': extractor_name(),
}
return self.__success_response(
status=HTTPStatus.OK.value,
response=response,
)
# noinspection PyPep8Naming
def do_POST(self) -> None:
self.wfile.write(self.__post())
# noinspection PyPep8Naming
def do_GET(self):
return self.__error_response(
status=HTTPStatus.METHOD_NOT_ALLOWED.value,
message="Try POST instead!",
)
def start_http_server(port: int) -> None:
"""Start HTTP server."""
log.info(f"Listening on port {port}...")
server = HTTPServer(('', port), ServerHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
log.info("Shutting down...")
server.server_close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Start page HTML -> article HTML extraction HTTP server.")
parser.add_argument("-p", "--port", type=int, default=80, help="Port to listen to")
args = parser.parse_args()
start_http_server(port=args.port)
|
jarpy/lambkin
|
lambkin/version.py
|
Python
|
apache-2.0
| 18
| 0
|
VER
|
SION = '0.3
|
.4'
|
charukiewicz/beer-manager
|
venv/lib/python3.4/site-packages/passlib/ext/django/models.py
|
Python
|
mit
| 12,558
| 0.002787
|
"""passlib.ext.django.models -- monkeypatch django hashing framework"""
#=============================================================================
# imports
#=============================================================================
# core
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
from django import VERSION
from django.conf import settings
# pkg
from passlib.context import CryptContext
from passlib.exc import ExpectedTypeError
from passlib.ext.django.utils import _PatchManager, hasher_to_passlib_name, \
get_passlib_hasher, get_preset_config
from passlib.utils.compat import callable, unicode, bytes
# local
__all__ = ["password_context"]
#=============================================================================
# global attrs
#=============================================================================
# the context object which this patches contrib.auth to use for password hashing.
# configuration controlled by ``settings.PASSLIB_CONFIG``.
password_context = CryptContext()
# function mapping User objects -> passlib user category.
# may be overridden via ``settings.PASSLIB_GET_CATEGORY``.
def _get_category(user):
"""default get_category() implementation"""
if user.is_superuser:
return "superuser"
elif user.is_staff:
return "staff"
else:
return None
# object used to track state of patches applied to django.
_manager = _PatchManager(log=logging.getLogger(__name__ + "._manager"))
# patch status
_patched = False
#=============================================================================
# applying & removing the patches
#=============================================================================
def _apply_patch():
"""monkeypatch django's password handling to use ``passlib_context``,
assumes the caller will configure the object.
"""
#
# setup constants
#
log.debug("preparing to monkeypatch 'django.contrib.auth' ...")
global _patched
assert not _patched, "monkeypatching already applied"
HASHERS_PATH = "django.contrib.auth.hashers"
MODELS_PATH = "django.contrib.auth.models"
USER_PATH = MODELS_PATH + ":User"
FORMS_PATH = "django.contrib.auth.forms"
#
# import UNUSUABLE_PASSWORD and is_password_usuable() helpers
# (providing stubs for older django versions)
#
if VERSION < (1,4):
has_hashers = False
if VERSION < (1,0):
UNUSABLE_PASSWORD = "!"
else:
from django.contrib.auth.models import UNUSABLE_PASSWORD
def is_password_usable(encoded):
return encoded is not None and encoded != UNUSABLE_PASSWORD
def is_valid_secret(secret):
return secret is not None
elif VERSION < (1,6):
has_hashers = True
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, \
is_password_usable
# NOTE: 1.4 - 1.5 - empty passwords no longer valid.
def is_valid_secret(secret):
return bool(secret)
else:
has_hashers = True
from django.contrib.auth.hashers import is_password_usable
# 1.6 - empty passwords valid again
def is_valid_secret(secret):
return secret is not None
if VERSION < (1,6):
def make_unusable_password():
return UNUSABLE_PASSWORD
else:
from django.contrib.auth.hashers import make_password as _make_password
def make_unusable_password():
return _make_password(None)
# django 1.4.6+ uses a separate hasher for "sha1$$digest" hashes
has_unsalted_sha1 = (VERSION >= (1,4,6))
#
# backport ``User.set_unusable_password()`` for Django 0.9
# (simplifies rest of the code)
#
if not hasattr(_manager.getorig(USER_PATH), "set_unusable_password"):
assert VERSION < (1,0)
@_manager.monkeypatch(USER_PATH)
def set_unusable_password(user):
user.password = make_unusable_password()
@_manager.monkeypatch(USER_PATH)
def has_usable_password(user):
return is_password_usable(user.password)
#
# patch ``User.set_password() & ``User.check_password()`` to use
# context & get_category (would just leave these as wrappers for hashers
# module under django 1.4, but then we couldn't pass User object into
# get_category very easily)
#
@_manager.monkeypatch(USER_PATH)
def set_password(user, password):
"passlib replacement for User.set_password()"
if is_valid_secret(password):
# NOTE: pulls _get_category from module globals
cat = _get_category(user)
user.password = password_context.encrypt(password, category=cat)
else:
user.set_unusable_password()
@_manager.monkeypatch(USER_PATH)
def check_password(user, password):
"passlib replacement for User.check_password()"
hash = user.password
if not is_valid_secret(password) or not is_password_usable(hash):
return False
if not hash and VERSION < (1,4):
return False
# NOTE: pulls _get_category from module globals
cat = _get_category(user)
ok, new_hash = password_context.verify_and_update(password, hash,
category=cat)
if ok and new_hash is not None:
# migrate to new hash if needed.
user.password = new_hash
user.save()
return ok
#
# override
|
check_password() with our own implementation
#
@_manager.monkeypatc
|
h(HASHERS_PATH, enable=has_hashers)
@_manager.monkeypatch(MODELS_PATH)
def check_password(password, encoded, setter=None, preferred="default"):
"passlib replacement for check_password()"
# XXX: this currently ignores "preferred" keyword, since it's purpose
# was for hash migration, and that's handled by the context.
if not is_valid_secret(password) or not is_password_usable(encoded):
return False
ok = password_context.verify(password, encoded)
if ok and setter and password_context.needs_update(encoded):
setter(password)
return ok
#
# patch the other functions defined in the ``hashers`` module, as well
# as any other known locations where they're imported within ``contrib.auth``
#
if has_hashers:
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(MODELS_PATH)
def make_password(password, salt=None, hasher="default"):
"passlib replacement for make_password()"
if not is_valid_secret(password):
return make_unusable_password()
if hasher == "default":
scheme = None
else:
scheme = hasher_to_passlib_name(hasher)
kwds = dict(scheme=scheme)
handler = password_context.handler(scheme)
# NOTE: django make specify an empty string for the salt,
# even if scheme doesn't accept a salt. we omit keyword
# in that case.
if salt is not None and (salt or 'salt' in handler.setting_kwds):
kwds['salt'] = salt
return password_context.encrypt(password, **kwds)
@_manager.monkeypatch(HASHERS_PATH)
@_manager.monkeypatch(FORMS_PATH)
def get_hasher(algorithm="default"):
"passlib replacement for get_hasher()"
if algorithm == "default":
scheme = None
else:
scheme = hasher_to_passlib_name(algorithm)
# NOTE: resolving scheme -> handler instead of
# passing scheme into get_passlib_hasher(),
# in case context contains custom handler
# shadowing name of a builtin handler.
handler = password_context.handler(scheme)
return get_passlib_hasher(handler, algorithm=algorithm)
# identify_hasher() was added in django 1.5,
# patching it anyways for 1.4,
|
jrg365/gpytorch
|
gpytorch/variational/batch_decoupled_variational_strategy.py
|
Python
|
mit
| 11,612
| 0.004134
|
#!/usr/bin/env python3
import torch
from torch.distributions.kl import kl_divergence
from ..distributions import Delta, MultivariateNormal
from ..lazy import MatmulLazyTensor, SumLazyTensor
from ..utils.errors import CachingError
from ..utils.memoize import pop_from_cache_ignore_args
from .delta_variational_distribution import DeltaVariationalDistribution
from .variational_strategy import VariationalStrategy
class BatchDecoupledVariationalStrategy(VariationalStrategy):
r"""
A VariationalStrategy that uses a different set of inducing points for the
variational mean and variational covar. It follows the "decoupled" model
proposed by `Jankowiak et al. (2020)`_ (which is roughly based on the strategies
proposed by `Cheng et al. (2017)`_.
Let :math:`\mathbf Z_\mu` and :math:`\mathbf Z_\sigma` be the mean/variance
inducing points. The variational distribution for an input :math:`\mathbf
x` is given by:
.. math::
\begin{align*}
\mathbb E[ f(\mathbf x) ] &= \mathbf k_{\mathbf Z_\mu \mathbf x}^\top
\mathbf K_{\mathbf Z_\mu \mathbf Z_\mu}^{-1} \mathbf m
\\
\text{Var}[ f(\mathbf x) ] &= k_{\mathbf x \mathbf x} - \mathbf k_{\mathbf Z_\sigma \mathbf x}^\top
\mathbf K_{\mathbf Z_\sigma \mathbf Z_\sigma}^{-1}
\left( \mathbf K_{\mathbf Z_\sigma} - \mathbf S \right)
\mathbf K_{\mathbf Z_\sigma \mathbf Z_\sigma}^{-1}
\mathbf k_{\mathbf Z_\sigma \mathbf x}
\end{align*}
where :math:`\mathbf m` and :math:`\mathbf S` are the variational parameters.
Unlike the original proposed implementation, :math:`\mathbf Z_\mu` and :math:`\mathbf Z_\sigma`
have **the same number of inducing points**, which allows us to perform batched operations.
Additionally, you can use a different set of kernel hyperparameters for the mean and the variance function.
We recommend using this feature only with the :obj:`~gpytorch.mlls.PredictiveLogLikelihood` objective function
as proposed in "Parametric Gaussian Process Regressors" (`Jankowiak et al. (2020)`_).
Use the :attr:`mean_var_batch_dim` to indicate which batch dimension corresponds to the different mean/var
kernels.
.. note::
We recommend using the "right-most" batch dimension (i.e. :attr:`mean_var_batch_dim=-1`) for the dimension
that corresponds to the different mean/variance kernel parameters.
Assuming you want `b1` many independent GPs, the :obj:`~gpytorch.variational._VariationalDistribution`
objects should have a batch shape of `b1`, and the mean/covar modules
of the GP should have a batch shape of `b1 x 2`.
(The 2 corresponds to the mean/variance hyperparameters.)
.. seealso::
:obj:`~gpytorch.variational.OrthogonallyDecoupledVariationalStrategy` (a variant proposed by
`Salimbeni et al. (2018)`_ that uses orthogonal projections.)
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param torch.Tensor inducing_points: Tensor containing a set of inducing
points to use for variational inference.
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
:param learn_inducing_locations: (Default True): Whether or not
the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
parameters of the model).
:type learn_inducing_locations: `bool`, optional
:type mean_var_batch_dim: `int`, optional
:param mean_var_batch_dim: (Default `None`):
Set this parameter (ideally to `-1`) to indicate which dimension corresponds to different
kernel hyperparameters for the mean/variance functions.
.. _Cheng et al. (2017):
https://arxiv.org/abs/1711.10127
.. _Salimbeni et al. (2018):
https://arxiv.org/abs/1809.08820
.. _Jankowiak et al. (2020):
https://arxiv.org/abs/1910.07123
Example (**different** hypers for mean/variance):
>>> class MeanFieldDecoupledModel(gpytorch.models.ApproximateGP):
>>> '''
>>> A batch of 3 independent MeanFieldDecoupled PPGPR models.
>>> '''
>>> def __init__(self, inducing_points):
>>> # The variational parameters have a batch_shape of [3]
>>> variational_distribution = gpytorch.variational.MeanFieldVariationalDistribution(
>>> inducing_points.size(-1), batch_shape=torch.Size([3]),
>>> )
>>> variational_strategy = gpytorch.variational.BatchDecoupledVariationalStrategy(
>>> self, inducing_points, variational_distribution, learn_inducing_locations=True,
>>> mean_var_batch_dim=-1
>>> )
>>>
>>> # The mean/covar modules have a batch_shape of [3, 2]
>>> # where the last batch dim corresponds to the mean & variance hyperparameters
>>> super().__init__(variational_strategy)
>>> self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([3, 2]))
>>> self.covar_module = gpytorch.kernels.ScaleKernel(
>>> gpytorch.kernels.RBFKernel(batch_shape=torch.Size([3, 2])),
>>> batch_shape=torch.Size([3, 2]),
>>> )
Example (**shared** hypers for mean/variance):
>>> class MeanFieldDecoupledModel(gpytorch.models.ApproximateGP):
>>> '''
>>> A batch of 3 independent MeanFieldDecoupled PPGPR models.
>>> '''
>>> def __init__(self, inducing_points):
>>> # The variational parameters have a batch_shape of [3]
>>> variational_distribution = gpytorch.variational.MeanFieldVariationalDistribution(
>>> inducing_points.size(-1), batch_shape=torch.Size([3]),
>>> )
>>> variational_strategy = gpytorch.variational.BatchDecoupledVariationalStrategy(
>>> self, inducing_points, variational_distribution, learn_inducing_locations=True,
>>> )
>>>
>>> # The mean/covar modules have a batch_shape of [3]
>>> super().__init__(variational_strategy)
>>> self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([3]))
>>> self.covar_module = gpytorch.kernels.ScaleKernel(
>>> gpytorch.kernels.RBFKernel(batch_shape=torch.Size([3])),
>>> batch_shape=torch.Size([3]),
>>> )
"""
def __init__(
self, model, inducing_points, variational_distribution, learn_inducing_locations=True, mean_var_batch_dim=None
):
if isinstance(variational_distribution, DeltaVariationalDistribution):
raise NotImplementedError(
"BatchDecoupledVariationalStrategy does not work with DeltaVariationalDistribution"
)
if mean_var_batch_dim is not None and mean_var_batch_dim >= 0:
|
raise ValueError(f"mean_var_batch_dim should be negative indexed, got {mean_var_batch_dim}")
self.mean_var_batch_dim = mean_var_batch_dim
# Maybe unsqueeze inducing points
if inducing_points.dim() == 1:
inducing_points = inducing_points.unsqueeze(-1)
# We're going to create two set of inducing points
# One set for computing the mean, one set for computing the variance
if self.m
|
ean_var_batch_dim is not None:
inducing_points = torch.stack([inducing_points, inducing_points], dim=(self.mean_var_batch_dim - 2))
else:
inducing_points = torch.stack([inducing_points, inducing_points], dim=-3)
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
def _expand_inputs(self
|
knadir/Flask-Images
|
tests/test_template_use.py
|
Python
|
bsd-3-clause
| 780
| 0
|
from . import *
class TestTemplateUse(TestCase):
def test_resized_img_src(self):
@self.app.route('/resized_img_src
|
')
def use():
return render_template_string('''
<img src="{{ resized_img_src('cc.png') }}" />
'''.strip())
res = self.client.get('/resized_img_src')
self.assert200(res)
self.a
|
ssertIn('src="/imgsizer/cc.png?', res.data)
def test_url_for(self):
@self.app.route('/url_for')
def use():
return render_template_string('''
<img src="{{ url_for('images', filename='cc.png') }}" />
'''.strip())
res = self.client.get('/url_for')
self.assert200(res)
self.assertIn('src="/imgsizer/cc.png?', res.data)
|
efce/voltPy
|
manager/operations/methods/MedianFilter.py
|
Python
|
gpl-3.0
| 1,485
| 0.00202
|
import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class
|
MedianFilter(method.ProcessingMethod):
can_be_applied = True
_ste
|
ps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
|
mc706/django-angular-scaffold
|
angular_scaffold/_docs.py
|
Python
|
mit
| 4,321
| 0.00648
|
docs = """django-angular-scaffold
=======================
[](https://travis-ci.org/mc706/django-angular-scaffold)
[](http://badge.fury.io/
|
py/django-angular-scaffold)
[](https://landscape.io/github/mc706/django-angular-scaffold/master)
[](https://coveralls.io/r/mc706/django-angular-scaffold)
set of django management commands to scaffold a django + angular project
##Installation
Install using pip
```
pip install django-angular-scaffold
`
|
``
include in your INSTALLED_APPS
```
#settings.py
...
INSTALLED_APPS = (
...
'angular_scaffold',
...
)
```
##Commands
The following are commands that are made available through this package.
###scaffold
```
./manage.py scaffold
```
Builds a assets folder structure in the following structure:
```
/assets
+ - app
| + - config
| + - controllers
| + - directives
| + - services
| + - views
| + - app.js
+ - lib
+ - fonts
+ - scripts
+ - styles
+ - site
| + - _global.scss
| + - _mixins.scss
| + - _variables.scss
+ - vendor
+ styles.scss
```
It will prompt for an application name, this will add start the angular app off.
It also automatically setups the `styles.scss` to import the pre stubbed out globals, mixins, and variables files.
The rest of the folders are stubbed out with a `.gitkeep` file to allow the directory structure to be added to git.
###startview
```
./manage.py startview <viewname>
```
creates new view, creates new styles and adds it to the import
Can accept a path. The following are valid viewname arguments
```
./manage startview homepage
./manage startview home-page
./manage startview ticket/new
./manage startview settings/options/test
```
This will create a view file in the appropriate folder, create a mirrored scss file in the site directory, and
import the style into the main styles.scss file.
###generatedocs
```
./manage.py generatedocs
```
Adds a `/docs` folder and copies some basic documentation into it
###createdebugger
```
./manage.py createdebugger <password>
```
Creates a config file for angular that overrides console.log and replaces it with
$log.debug. Then disables $log.debug unless a query string with an encoded password
is included.
This makes it very easy to debug your application without having to expose the underlying
to the users. It also allows you to keep your logging statements in your app when going to
production, as they are turned off and hidden by default.
###startservice
```
./manage.py startservice <service_name>
```
Creates a starter service. Will ask for the endpoint, and the pluralization of the service name,
and will create list, get, post, put, and delete methods for that service.
###startcontroller
```
./manage.py startcontroller <controller_name>
```
Creates a new empty controller in controllers directory.
###createcsrf
```
./manage.py createcsrf
```
Adds the csrf token to your angular ajax headers in a config file. Also injects the `ngCookies` dependency into your app.
###startroutes
```
./manage.py startroutes
```
Adds a routes config file and inject ngRoute as a dependency.
Creates a defaulted route to `/` using `HomeController` and `views/home.html`.
###addroute
```
./manage.py addroute
```
Adds a route to the routes. Will prompt for url, controller, views, and a number of variables to resolve.
* `when:` - put in the url in angular notation for this route example: `/tickets/:ticket/edit/`
* `controller:` - the short name of the controller example: `ticket = TicketController`
* `view: ` - path relative to the views folder to the html template example: `tickets/ticket.html`
* `resolve` - name of variable to resolve into controller
###createlimiter
```
./manage.py createlimiter
```
Adds a runtime config that gives access to a `$rootScope.checkLimiter()` function that you can use in services
to limit the number of calls made. """
|
LuckehPickle/Comet
|
docs/categories.py
|
Python
|
apache-2.0
| 2,173
| 0.02347
|
"""
Copyright (c) 2016 - Sean Bailey - All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Django Imports
from django.core.urlresolvers import reverse
# Docs Imports
from docs.models import Article
class Category:
"""
Article Category
"""
def __init__(self, title, description, url, colour, db_code):
self.title = title
self.description = description
self.url = url
self.colour = colour
self.db_code = db_code
def get_absolute_url(self):
return reverse("category", args=[self.url])
def recent_articles(self):
"""
Collects the most recent articles of a category.
"""
return Article.objects.filter(
category=self.db_code,
).order_by("last_edited")[:10]
NEWS = Category(
title = "News",
description = "Site news, changelogs and updates.",
url = "news",
colour = "#F44336",
db_code = "NE",
)
SUPPORT = Category(
title = "Support",
description = "Helping you understand Comet.",
url = "support",
colour = "#F45A36",
db_code = "SU",
)
DEVELOPER = Category(
title = "Developer",
description = "Developer logs, explanations and all things code.",
url = "developer",
colour = "#F47B36",
|
db_code = "DE",
)
COMMUNITY = Category(
title = "Community",
description = "For the community, by the community.",
url = "community",
colour = "#F49336",
db_co
|
de = "CO",
)
OTHER = Category(
title = "Other",
description = "Miscellaneous articles.",
url = "other",
colour = "#F4A336",
db_code = "OT",
)
CATEGORIES = [NEWS, SUPPORT, DEVELOPER, COMMUNITY, OTHER]
|
jromang/retina-old
|
distinclude/spyderlib/widgets/sourcecode/terminal.py
|
Python
|
gpl-3.0
| 4,314
| 0.007651
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Terminal emulation tools"""
import os
class ANSIEscapeCodeHandler(object):
"""ANSI Escape sequences handler"""
if os.name == 'nt':
# Windows terminal colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#808080'), # 0: black
('#800000', '#ff0000'), # 1: red
('#008000', '#00ff00'), # 2: green
('#808000', '#ffff00'), # 3: yellow
('#000080', '#0000ff'), # 4: blue
('#800080', '#ff00ff'), # 5: magenta
('#008080', '#00ffff'), # 6: cyan
('#c0c0c0', '#ffffff'), # 7: white
)
elif os.name == 'mac':
# Terminal.app colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#818383'), # 0: black
('#C23621', '#FC391F'), # 1: red
('#25BC24', '#25BC24'), # 2: green
('#ADAD27', '#EAEC23'), # 3: yellow
('#492EE1', '#5833FF'), # 4: blue
('#D338D3', '#F935F8'), # 5: magenta
('#33BBC8', '#14F0F0'), # 6: cyan
('#CBCCCD', '#E9EBEB'), # 7: white
)
else:
# xterm colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#7F7F7F'), # 0: black
('#CD0000', '#ff0000'), # 1: red
('#00CD00', '#00ff00')
|
, # 2: green
('#CDCD00', '#ffff00'), # 3: yellow
('#0000EE', '#5C5CFF'), # 4: blue
('#CD00CD', '#ff00ff'), # 5: magenta
('#00CDCD', '#00ffff'), # 6: cyan
('#E5E5E5', '#ffffff'), # 7: w
|
hite
)
def __init__(self):
self.intensity = 0
self.italic = None
self.bold = None
self.underline = None
self.foreground_color = None
self.background_color = None
self.default_foreground_color = 30
self.default_background_color = 47
def set_code(self, code):
assert isinstance(code, int)
if code == 0:
# Reset all settings
self.reset()
elif code == 1:
# Text color intensity
self.intensity = 1
# The following line is commented because most terminals won't
# change the font weight, against ANSI standard recommendation:
# self.bold = True
elif code == 3:
# Italic on
self.italic = True
elif code == 4:
# Underline simple
self.underline = True
elif code == 22:
# Normal text color intensity
self.intensity = 0
self.bold = False
elif code == 23:
# No italic
self.italic = False
elif code == 24:
# No underline
self.underline = False
elif code >= 30 and code <= 37:
# Text color
self.foreground_color = code
elif code == 39:
# Default text color
self.foreground_color = self.default_foreground_color
elif code >= 40 and code <= 47:
# Background color
self.background_color = code
elif code == 49:
# Default background color
self.background_color = self.default_background_color
self.set_style()
def set_style(self):
"""
Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline'
"""
raise NotImplementedError
def reset(self):
self.current_format = None
self.intensity = 0
self.italic = False
self.bold = False
self.underline = False
self.foreground_color = None
self.background_color = None
|
beeftornado/sentry
|
src/sentry/grouping/strategies/base.py
|
Python
|
bsd-3-clause
| 10,151
| 0.001084
|
from __future__ import absolute_import
import six
import inspect
from sentry import projectoptions
from sentry.grouping.component import GroupingComponent
from sentry.grouping.enhancer import Enhancements
STRATEGIES = {}
RISK_LEVEL_LOW = 0
RISK_LEVEL_MEDIUM = 1
RISK_LEVEL_HIGH = 2
def strategy(id=None, ids=None, variants=None, interfaces=None, name=None, score=None):
"""Registers a strategy"""
if interfaces is None or variants is None:
raise TypeError("interfaces and variants are required")
if name is None:
if len(interfaces) != 1:
raise RuntimeError("%r requires a name" % id)
name = interfaces[0]
if id is not None:
if ids is not None:
raise TypeError("id and ids given")
ids = [id]
def decorator(f):
for id in ids:
STRATEGIES[id] = rv = Strategy(
id=id, name=name, interfaces=interfaces, variants=variants, score=score, func=f
)
return rv
return decorator
def lookup_strategy(strategy_id):
"""Looks up a strategy by id."""
try:
return STRATEGIES[strategy_id]
except KeyError:
raise LookupError("Unknown strategy %r" % strategy_id)
class Strategy(object):
"""Baseclass for all strategies."""
def __init__(self, id, name, interfaces, variants, score, func):
self.id = id
self.strategy_class = id.split(":", 1)[0]
self.name = name
self.interfaces = interfaces
self.mandatory_variants = []
self.optional_variants = []
|
self.variants =
|
[]
for variant in variants:
if variant[:1] == "!":
self.mandatory_variants.append(variant[1:])
else:
self.optional_variants.append(variant)
self.variants.append(variant)
self.score = score
self.func = func
self.variant_processor_func = None
def __repr__(self):
return "<%s id=%r variants=%r>" % (self.__class__.__name__, self.id, self.variants)
def _invoke(self, func, *args, **kwargs):
# We forcefully override strategy here. This lets a strategy
# function always access its metadata and directly forward it to
# subcomponents without having to filter out strategy.
kwargs["strategy"] = self
return func(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self._invoke(self.func, *args, **kwargs)
def variant_processor(self, func):
"""Registers a variant reducer function that can be used to postprocess
all variants created from this strategy.
"""
self.variant_processor_func = func
return func
def get_grouping_component(self, event, variant, config):
"""Given a specific variant this calculates the grouping component.
"""
args = []
for iface_path in self.interfaces:
iface = event.interfaces.get(iface_path)
if iface is None:
return None
args.append(iface)
return self(event=event, variant=variant, config=config, *args)
def get_grouping_component_variants(self, event, config):
"""This returns a dictionary of all components by variant that this
strategy can produce.
"""
rv = {}
# trivial case: we do not have mandatory variants and can handle
# them all the same.
if not self.mandatory_variants:
for variant in self.variants:
component = self.get_grouping_component(event, variant, config)
if component is not None:
rv[variant] = component
else:
mandatory_component_hashes = {}
prevent_contribution = None
for variant in self.mandatory_variants:
component = self.get_grouping_component(event, variant, config)
if component is None:
continue
if component.contributes:
mandatory_component_hashes[component.get_hash()] = variant
rv[variant] = component
prevent_contribution = not mandatory_component_hashes
for variant in self.optional_variants:
# We also only want to create another variant if it
# produces different results than the mandatory components
component = self.get_grouping_component(event, variant, config)
if component is None:
continue
# In case this variant contributes we need to check two things
# here: if we did not have a system match we need to prevent
# it from contributing. Additionally if it matches the system
# component we also do not want the variant to contribute but
# with a different message.
if component.contributes:
if prevent_contribution:
component.update(
contributes=False,
hint="ignored because %s variant is not used"
% (
list(mandatory_component_hashes.values())[0]
if len(mandatory_component_hashes) == 1
else "other mandatory"
),
)
else:
hash = component.get_hash()
duplicate_of = mandatory_component_hashes.get(hash)
if duplicate_of is not None:
component.update(
contributes=False,
hint="ignored because hash matches %s variant" % duplicate_of,
)
rv[variant] = component
if self.variant_processor_func is not None:
rv = self._invoke(self.variant_processor_func, rv, event=event, config=config)
return rv
class StrategyConfiguration(object):
id = None
base = None
config_class = None
strategies = {}
delegates = {}
changelog = None
hidden = False
risk = RISK_LEVEL_LOW
def __init__(self, enhancements=None, **extra):
if enhancements is None:
enhancements = Enhancements([])
else:
enhancements = Enhancements.loads(enhancements)
self.enhancements = enhancements
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.id)
def iter_strategies(self):
"""Iterates over all strategies by highest score to lowest."""
return iter(sorted(self.strategies.values(), key=lambda x: -x.score))
def get_grouping_component(self, interface, *args, **kwargs):
"""Invokes a delegate grouping strategy. If no such delegate is
configured a fallback grouping component is returned.
"""
path = interface.path
strategy = self.delegates.get(path)
if strategy is not None:
kwargs["config"] = self
return strategy(interface, *args, **kwargs)
return GroupingComponent(id=path, hint="grouping algorithm does not consider this value")
@classmethod
def as_dict(self):
return {
"id": self.id,
"base": self.base.id if self.base else None,
"strategies": sorted(self.strategies),
"changelog": self.changelog,
"delegates": sorted(x.id for x in self.delegates.values()),
"hidden": self.hidden,
"risk": self.risk,
"latest": projectoptions.lookup_well_known_key("sentry:grouping_config").get_default(
epoch=projectoptions.LATEST_EPOCH
)
== self.id,
}
def create_strategy_configuration(
id, strategies=None, delegates=None, changelog=None, hidden=False, base=None, risk=None
):
"""Declares a new strategy configuration.
Values can be inherited from a base configuration. For strategies if there is
a strategy of the same class it's
|
nosix/PyCraft
|
src/pycraft/network/__init__.py
|
Python
|
lgpl-3.0
| 433
| 0.002309
|
# -*- coding: utf8 -*-
from .interface import Reliability, Session, Handler
from .logger import LogName
from .server import Server
from .protocol import Protocol, packet_classes
from .packet import ApplicationPacket
from .portscanner import PortScanner
__all__ = [
|
'Rel
|
iability',
'Session',
'Handler',
'LogName',
'Server',
'Protocol',
'ApplicationPacket',
'PortScanner',
'packet_classes',
]
|
vrenkens/nabu
|
nabu/neuralnetworks/trainers/standard_trainer.py
|
Python
|
mit
| 844
| 0.001185
|
'''@file standard_trainer.py
contains the StandardTrainer'''
from nabu.neuralnetworks.trainers import
|
trainer
class
|
StandardTrainer(trainer.Trainer):
'''a trainer with no added functionality'''
def aditional_loss(self):
'''
add an aditional loss
returns:
the aditional loss or None
'''
return None
def chief_only_hooks(self, outputs):
'''add hooks only for the chief worker
Args:
outputs: the outputs generated by the create graph method
Returns:
a list of hooks
'''
return []
def hooks(self, outputs):
'''add hooks for the session
Args:
outputs: the outputs generated by the create graph method
Returns:
a list of hooks
'''
return []
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/reportlab/lib/normalDate.py
|
Python
|
gpl-3.0
| 20,867
| 0.005367
|
#!/usr/bin/env python
# normalDate.py - version 1.0 - 20000717
#hacked by Robin Becker 10/Apr/2001
#major changes include
# using Types instead of type(0) etc
# BusinessDate class
# __radd__, __rsub__ methods
# formatMS stuff
# derived from an original version created
# by Jeff Bauer of Rubicon Research and used
# with his kind permission
__version__=''' $Id$ '''
__doc__="Jeff Bauer's lightweight date class, extended by us. Predates Python's datetime module."
_bigBangScalar = -4345732 # based on (-9999, 1, 1) BC/BCE minimum
_bigCrunchScalar = 2958463 # based on (9999,12,31) AD/CE maximum
_daysInMonthNormal = [31,28,31,30,31,30,31,31,30,31,30,31]
_daysInMonthLeapYear = [31,29,31,30,31,30,31,31,30,31,30,31]
_dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
_monthName = ['January', 'February', 'March', 'April', 'May', 'June',
'July','August','September','October','November','December']
import string, re, time, datetime
if hasattr(time,'struct_time'):
_DateSeqTypes = (list,tuple,time.struct_time)
else:
_DateSeqTypes = (list,tuple)
_fmtPat = re.compile('\\{(m{1,5}|yyyy|yy|d{1,4})\\}',re.MULTILINE|re.IGNORECASE)
_iso_re = re.compile(r'(\d\d\d\d|\d\d)-(\d\d)-(\d\d)')
def getStdMonthNames():
return list(map(string.lower,_monthName))
def getStdShortMonthNames():
return [x[:3] for x in getStdMonthNames()]
def getStdDayNames():
return list(map(string.lower,_dayOfWeekName))
def getStdShortDayNames():
return [x[:3] for x in getStdDayNames()]
def isLeapYear(year):
"""determine if specified year is leap year, returns Python boolean"""
if year < 1600:
if year % 4:
return 0
else:
return 1
elif year % 4 != 0:
return 0
elif year % 100 != 0:
return 1
elif year % 400 != 0:
return 0
else:
return 1
class NormalDateException(Exception):
"""Exception class for NormalDate"""
pass
class NormalDate:
"""
NormalDate is a specialized class to handle dates without
all the excess baggage (time zones, daylight savings, leap
seconds, etc.) of other date structures. The minimalist
strategy greatly simplifies its implementation and use.
Internally, NormalDate is stored as an integer with values
in a discontinuous range of -99990101 to 99991231. The
integer value is used principally for storage and to simplify
the user interface. Internal calculations are performed by
a scalar based on Jan 1, 1900.
Valid NormalDate ranges include (-9999,1,1) B.C.E. through
(9999,12,31) C.E./A.D.
1.0
No changes, except the version number. After 3 years of use by
various parties I think we can consider it stable.
0.8
Added Prof. Stephen Walton's suggestion for a range method
- module author resisted the temptation to use lambda <0.5 wink>
0.7
Added Dan Winkler's suggestions for __add__, __sub__ methods
0.6
Modifications suggested by Kevin Digweed to fix:
- dayOfWeek, dayOfWeekAbbrev, clone methods
- Permit NormalDate to be a better behaved superclass
0.5
Minor tweaking
0.4
- Added methods __cmp__, __hash__
- Added Epoch variable, scoped to the module
- Added setDay, setMonth, setYear methods
0.3
Minor touch-ups
0.2
- Fixed bug for certain B.C.E leap years
- Added Jim Fulton's suggestions for short alias class name =ND
and __getstate__, __setstate__ methods
Special thanks: Roedy Green
"""
def __init__(self, normalDate=None):
"""
Accept 1 of 4 values to
|
initialize a NormalDate:
1. None - creates a NormalDate for the current day
2. integer in yyyymm
|
dd format
3. string in yyyymmdd format
4. tuple in (yyyy, mm, dd) - localtime/gmtime can also be used
"""
if normalDate is None:
self.setNormalDate(time.localtime(time.time()))
else:
self.setNormalDate(normalDate)
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not isinstance(days,int):
raise NormalDateException( \
'add method parameter must be integer type')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to normalDate and return a new, calculated value"""
if not isinstance(days,int):
raise NormalDateException( \
'__add__ parameter must be integer type')
cloned = self.clone()
cloned.add(days)
return cloned
def __radd__(self,days):
'''for completeness'''
return self.__add__(days)
def clone(self):
"""return a cloned instance of this normalDate"""
return self.__class__(self.normalDate)
def __cmp__(self, target):
if target is None:
return 1
elif not hasattr(target, 'normalDate'):
return 1
else:
return cmp(self.normalDate, target.normalDate)
def day(self):
"""return the day as integer 1-31"""
return int(repr(self.normalDate)[-2:])
def dayOfWeek(self):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
return dayOfWeek(*self.toTuple())
def dayOfWeekAbbrev(self):
"""return day of week abbreviation for current date: Mon, Tue, etc."""
return _dayOfWeekName[self.dayOfWeek()][:3]
def dayOfWeekName(self):
"""return day of week name for current date: Monday, Tuesday, etc."""
return _dayOfWeekName[self.dayOfWeek()]
def dayOfYear(self):
"""day of year"""
if self.isLeapYear():
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
priorMonthDays = 0
for m in range(self.month() - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
return self.day() + priorMonthDays
def daysBetweenDates(self, normalDate):
"""
return value may be negative, since calculation is
self.scalar() - arg
"""
if isinstance(normalDate,NormalDate):
return self.scalar() - normalDate.scalar()
else:
return self.scalar() - NormalDate(normalDate).scalar()
def equals(self, target):
if isinstance(target,NormalDate):
if target is None:
return self.normalDate is None
else:
return self.normalDate == target.normalDate
else:
return 0
def endOfMonth(self):
"""returns (cloned) last day of month"""
return self.__class__(self.__repr__()[-8:-2]+str(self.lastDayOfMonth()))
def firstDayOfMonth(self):
"""returns (cloned) first day of month"""
return self.__class__(self.__repr__()[-8:-2]+"01")
def formatUS(self):
"""return date as string in common US format: MM/DD/YY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-6:-4])
def formatUSCentury(self):
"""return date as string in 4-digit year US format: MM/DD/YYYY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-8:-4])
def _fmtM(self):
return str(self.month())
def _fmtMM(self):
return '%02d' % self.month()
def _fmtMMM(self):
return self.monthAbbrev()
def _fmtMMMM(self):
return self.monthName()
def _fmtMMMMM(self):
return self.monthName()[0]
def _fmtD(self):
return str(self.day())
def _fmtDD(self):
return '%02d' % self.day()
def _fmtDDD(self):
return self.dayOfWeekAbbrev()
def _fmtDDDD(self):
return self.dayOfWeekName()
def _fmtYY(self):
return '%02d' % (self.year()%100)
def _fmtYYYY(self):
return str(self.year())
def formatMS(self,fmt):
'''format like MS date using the notation
{YY} --> 2 digit year
{YYYY} --> 4 digit
|
beaker-project/beaker
|
Common/bkr/common/__init__.py
|
Python
|
gpl-2.0
| 225
| 0
|
# Since bkr is a namespace package (and thus cannot have version specific
# code in bkr.__init__), the version details are retrieved from here in
# order to co
|
rrectly handle module shadowing on sys.path
__version__ = '28.2'
|
|
schesis/fix
|
tests/decorators/test_with_fixture.py
|
Python
|
gpl-3.0
| 3,536
| 0
|
"""Tests for `fix.with_fixture`."""
from __future__ import with_statement
import os
import shutil
import tempfile
from types import FunctionType
from fix import with_fixture
def test_exists():
"""`fix.with_fixture` function exists"""
assert isinstance(with_fixture, FunctionType)
def test_setup_only():
"""`setup_only` fixture works as expected"""
def setup_only(context):
"""A fixture with no `teardown()`."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
return setup
@with_fixture(setup_only)
def case(context):
"""Check that the context has been set up."""
assert context == {"squee": "kapow"}
case() # pylint: disable=E1120
def test_setup_teardown():
"""`setup_teardown` fixture works as expected"""
def setup_teardown(context):
"""A fixture with both `setup()` and `teardown()`."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
def teardown():
"""Check that `context.squee` has changed."""
assert context == {"squee": "boing"}
return setup, teardown
@with_fixture(setup_teardown)
def case(context):
"""Alter the context."""
assert context == {"squee": "kapow"}
context.squee = "boing"
case() # pylint: disable=E1120
def test_multiple_invocation():
"""`multiple` fixture creates a fresh context each invocation"""
def multiple(context):
"""A fixture to be invoked multiple times."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
def teardown():
"""Check that `context.squee` has changed."""
assert context == {"squee": "kapow", "boing": "thunk"}
return setup, teardown
@with_fixture(multiple)
def case(context):
"""Add to the context."""
assert context == {"squee": "kapow"}
context.boing = "thunk"
for _ in range(3):
case() # pylint: disable=E1120
def test_external():
"""`external` fixture interacts as expected with the 'real world'."""
def external(context, files=3):
"""A fixture to manipulate temporary files and directories."""
def setup():
"""Create some temporary files."""
con
|
text.temp_dir = temp
|
file.mkdtemp()
context.filenames = ["file_%03d" % i for i in range(files)]
for filename in context.filenames:
with open(os.path.join(context.temp_dir, filename), "w") as f:
f.write("This is the file %r.\n" % filename)
def teardown():
"""Delete the temporary files created in `setup()`."""
shutil.rmtree(context.temp_dir)
return setup, teardown
@with_fixture(external, files=5)
def check_files(context):
"""Return the number of present and absent files."""
present = 0
absent = 0
for filename in context.filenames:
if os.path.exists(os.path.join(context.temp_dir, filename)):
present += 1
else:
absent += 1
return context.temp_dir, present, absent
temp_dir, present, absent = check_files() # pylint: disable=E1120
assert not os.path.exists(temp_dir)
assert present == 5
assert absent == 0
|
silveregg/moto
|
moto/route53/urls.py
|
Python
|
apache-2.0
| 565
| 0.00177
|
from __future__ import unicode_literals
from . import responses
url_bases = [
"https://route5
|
3.amazonaws.com/201.-..-../",
]
url_paths = {
'{0}hostedzone$': responses.list_or_create_hostzone_response,
'{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response
|
,
'{0}hostedzone/[^/]+/rrset/?$': responses.rrset_response,
'{0}healthcheck': responses.health_check_response,
'{0}tags/(healthcheck|hostedzone)/*': responses.list_or_change_tags_for_resource_request,
'{0}trafficpolicyinstances/*': responses.not_implemented_response
}
|
ccxt/ccxt
|
python/ccxt/bitget.py
|
Python
|
mit
| 117,797
| 0.003466
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitget(Exchange):
def describe(self):
return self.deep_extend(super(bitget, self).describe(), {
'id': 'bitget',
'name': 'Bitget',
'countries': ['SG'],
'version': 'v1',
'rateLimit': 50, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': None, # has but unimplemented
'future': None, # has but unimplemented
'option': False,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'fetchAccounts': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposits': False,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchWithdrawals': False,
'setLeverage': True,
'setMarginMode': True,
},
'timeframes': {
'spot': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1h',
'4h': '4h',
'12h': '12h',
'1d': '1day',
'1w': '7day', # not documented on the website
},
'swap': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'4h': '14400',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
},
'hostname': 'bitget.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/88317935-a8a21c80-cd22-11ea-8e2b-4b9fac5975eb.jpg',
'api': {
'spot': 'https://api.{hostname}',
'mix': 'https://api.{hostname}',
|
},
'www': 'https://www.bitget.com',
|
'doc': [
'https://bitgetlimited.github.io/apidoc/en/swap',
'https://bitgetlimited.github.io/apidoc/en/spot',
],
'fees': 'https://www.bitget.cc/zh-CN/rate?tab=1',
'test': {
'rest': 'https://testnet.bitget.com',
},
'referral': 'https://www.bitget.com/expressly?languageType=0&channelCode=ccxt&vipCode=tg9j',
},
'api': {
'public': {
'spot': {
'get': {
'public/time': 1,
'public/currencies': 1,
'public/products': 1,
'public/product': 1,
'market/ticker': 1,
'market/tickers': 1,
'market/fills': 1,
'market/candles': 1,
'market/depth': 1,
},
},
'mix': {
'get': {
'market/contracts': 1,
'market/depth': 1,
'market/ticker': 1,
'market/tickers': 1,
'market/fills': 1,
'market/candles': 1,
'market/index': 1,
'market/funding-time': 1,
'market/history-fundRate': 1,
'market/current-fundRate': 1,
'market/open-interest': 1,
'market/mark-price': 1,
'market/symbol-leverage': 1,
},
},
},
'private': {
'spot': {
'get': {
'account/assets': 2,
'account/transferRecords': 1,
},
'post': {
'account/bills': 2,
'trade/orders': 2,
'trade/batch-orders': 4,
'trade/cancel-order': 2,
'trade/cancel-batch-orders': 4,
'trade/orderInfo': 1,
'trade/open-orders': 1,
'trade/history': 1,
'trade/fills': 1,
},
},
'mix': {
'get': {
'account/account': 2,
'account/accounts': 2,
'account/open-count': 1,
'order/current': 2,
'order/history': 2,
'order/detail': 2,
'order/fills': 2,
'position/singlePosition': 2,
'position/allPosition': 2,
'trace/currentTrack': 2,
'trace/followerOrder': 2,
'trace/historyTrack': 2,
'trace/summary': 2,
'trace/profitSettleTokenIdGroup': 2,
'trace/profitDateGroupList': 2,
'trade/profitDateList': 2,
'trace/waitProfitDateList': 2,
'trace/traderSymbols': 2,
},
'post': {
'account/setLeverage': 8,
'account/setMargin': 8,
'account/setMarginMode': 8,
'order/placeOrder': 2,
|
JulienDrecq/django-odoo-auth
|
odoo_auth/models.py
|
Python
|
bsd-3-clause
| 246
| 0
|
from django.db import models
from django.contrib.auth.m
|
odels import User
class OdooUser(models.Model):
user = models.OneToOneField(User)
odoo_id = models.BigIntegerField(primary_key=True)
|
username = models.CharField(max_length=256)
|
chubin/cheat.sh
|
lib/buttons.py
|
Python
|
mit
| 1,461
| 0.004791
|
TWITTER_BUTTON = """
<a href="https://twitter.com/igor_chubin" class="twitter-follow-button" data-show-count="false" data-button="grey">Follow @igor_chubin</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
"""
GITHUB_BUTTON = """
<!-- Place this tag where you want the button to render. -->
<a aria-label="Star chubin/wttr.in on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/cheat.sh#stargazers_count" data-count-href="/chubin/cheat.sh/stargazers" data-icon="octicon-star" href="https://github.com/chubin/cheat.sh" class="github-
|
button">cheat.sh</a>
"""
GITHUB_BUTTON_2 = """
<!-- Place this tag where you want the button to render. -->
<a aria-label="Star chubin/cheat.sheets on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/cheat.sheets#stargazers_count" data-count-href="/chubin/c
|
heat.sheets/stargazers" data-icon="octicon-star" href="https://github.com/chubin/cheat.sheets" class="github-button">cheat.sheets</a>
"""
GITHUB_BUTTON_FOOTER = """
<!-- Place this tag right after the last button or just before your close body tag. -->
<script async defer id="github-bjs" src="https://buttons.github.io/buttons.js"></script>
"""
|
gnovak/overheard
|
overheard/test.py
|
Python
|
mit
| 12,458
| 0.004816
|
#########
# Notes #
#########
#
# Run all tests from command line
# python test.py
# python -m test
#
# Run subset of tests from command line
# python -m unittest ArchivTest
# python -m unittest ArchivTest.test_old_arxiv_id
#
# Run all tests non-interactive
|
ly from REPL
# import test; test.test()
#
# Run specific test interactively from REPL
# test.ArchivTest('test_old_arxiv_id').debug()
#
from __future__ import with_statement
import unitt
|
est, re, tempfile, os
if not hasattr(unittest, 'skipIf'):
try:
import unittest2 as unittest
except ImportError:
raise NotImplementedError, \
"""Tests require either the Python 2.7 or later version of the unittest module or
the unittest2 module."""
import arxiv_id, scrape, util, update, fetch, overheard
network_tests = True
test_aids = ['astro-ph/0701019', # gzipped tex file
'astro-ph/0701528', # gzipped tar file
'astro-ph/0701864', # PDF file
'1211.1574', # gzipped tex file
'1211.4164', # gzipped tar file
'1211.2577'] # PDF file
test_delay = 2
test_file = "overheard.py"
class OverheardTest(unittest.TestCase):
def setUp(self):
self.fetch_verbose_setting = fetch.verbose
self.scrape_verbose_setting = scrape.verbose
# This is the _only_ test where I download some
# non-pre-screened papers, ie, they could be big. It's useful
# to show the download so I don't get worried that the test
# has hung.
fetch.verbose = True
scrape.verbose = False
def tearDown(self):
fetch.verbose = self.fetch_verbose_setting
scrape.verbose = self.scrape_verbose_setting
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_process_todays_papers(self):
overheard.process_todays_papers(delay=test_delay,
prefix=tempfile.gettempdir(), nmax=2)
class FetchTest(unittest.TestCase):
def setUp(self):
self.verbose_setting = fetch.verbose
fetch.verbose = False
def tearDown(self):
fetch.verbose = self.verbose_setting
def test_extension(self):
fetch.extension('filename.txt')
def test_ensure_dirs_exist(self):
the_dir = os.path.join(tempfile.mkdtemp(), 'aa', 'bb', 'file.txt')
fetch.ensure_dirs_exist(the_dir)
def test_arxiv_to_url(self):
for aid in test_aids:
fetch.arxiv_to_url(aid)
def test_fetch_command(self):
for aid in test_aids:
fetch.fetch_command(aid, "filename")
def test_untar_command(self):
fetch.untar_command("fake.tar")
def test_gunzip_command(self):
fetch.gunzip_command("fake.gz")
def test_latex_file_name(self):
for aid in test_aids:
fetch.latex_file_name(aid)
def test_latex_file_path(self):
for aid in test_aids:
fetch.latex_file_path(aid)
def test_file_name_base(self):
for aid in test_aids:
fetch.file_name_base(aid)
def test_source_file_extension(self):
for aid in test_aids:
fetch.source_file_extension(aid)
def test_source_file_exists(self):
for aid in test_aids:
fetch.source_file_exists(aid)
def test_source_file_name(self):
for aid in test_aids:
fetch.source_file_name(aid)
def test_source_file_path(self):
for aid in test_aids:
fetch.source_file_path(aid)
def test_source_file_path_without_extension(self):
for aid in test_aids:
fetch.source_file_path_without_extension(aid)
def test_file_type_string(self):
fetch.file_type_string(test_file)
def test_is_tar(self):
fetch.is_tar(test_file)
def test_is_gzip(self):
fetch.is_gzip(test_file)
def test_is_pdf(self):
fetch.is_pdf(test_file)
def test_is_tex(self):
fetch.is_tex(test_file)
def test_is_other(self):
fetch.is_other(test_file)
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_fetch_source_and_latex(self):
# the exercises fetch.source, fetch.all_source, fetch.latex, and
# fetch.all_latex
fetch.all_source(test_aids, delay=test_delay, force=True)
fetch.all_latex(test_aids)
class UpdateTest(unittest.TestCase):
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_fetch_rss(self):
update.fetch_rss()
@unittest.skipIf(not network_tests, "Skipping network tests.")
def test_parse_rss(self):
update.parse_rss()
class UtilTest(unittest.TestCase):
def test_remember_cwd(self):
cwd = os.getcwd()
with util.remember_cwd():
os.chdir("..")
self.assertEqual(os.getcwd(), cwd)
def test_can_uncan_file_object(self):
obj = [1,2,3]
tf = tempfile.TemporaryFile()
util.can(obj, tf)
tf.seek(0)
self.assertEqual(util.uncan(tf), obj)
def test_can_uncan_file_name(self):
obj = [1,2,3]
tf = tempfile.NamedTemporaryFile()
util.can(obj, tf.name)
tf.seek(0)
self.assertEqual(util.uncan(tf.name), obj)
class ArxivIdTest(unittest.TestCase):
def test_old(self):
# good ids
self.assertTrue(arxiv_id.is_old('astro-ph/1234567'))
self.assertTrue(arxiv_id.is_old('astro-ph/1234567v1'))
self.assertTrue(arxiv_id.is_old('astro-ph/1234567v12'))
# too short
self.assertFalse(arxiv_id.is_old('astro-ph/123456'))
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v'))
# too long
self.assertFalse(arxiv_id.is_old('astro-ph/12345678'))
# wrong letter
self.assertFalse(arxiv_id.is_old('astro-ph/1234567a1'))
# junk at start
self.assertFalse(arxiv_id.is_old('astro-ph/a1234567'))
self.assertFalse(arxiv_id.is_old('astro-ph/a1234567v1'))
self.assertFalse(arxiv_id.is_old('astro-ph/a1234567v12'))
# junk at end
self.assertFalse(arxiv_id.is_old('astro-ph/1234567a'))
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v1a'))
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v12a'))
# two versions
self.assertFalse(arxiv_id.is_old('astro-ph/1234567v1v2'))
# No archive name
self.assertFalse(arxiv_id.is_old('/1234567v1v2'))
# No slash
self.assertFalse(arxiv_id.is_old('astro-ph1234567v1v2'))
def test_old_id_parse(self):
self.assertEqual(arxiv_id.archive('astro-ph/1234567v12'), 'astro-ph')
self.assertEqual(arxiv_id.yymm('astro-ph/1234567v12'), '1234')
self.assertEqual(arxiv_id.number('astro-ph/1234567v12'), '567')
self.assertEqual(arxiv_id.version('astro-ph/1234567v12'), 'v12')
self.assertEqual(arxiv_id.version('astro-ph/1234567'), '')
def test_new_id_parse(self):
self.assertEqual(arxiv_id.archive('1234.5678v12'), '')
self.assertEqual(arxiv_id.yymm('1234.5678v12'), '1234')
self.assertEqual(arxiv_id.number('1234.5678v12'), '5678')
self.assertEqual(arxiv_id.version('1234.5678v12'), 'v12')
self.assertEqual(arxiv_id.version('1234.5678'), '')
def test_is_new(self):
# good ids
self.assertTrue(arxiv_id.is_new('1234.5678'))
self.assertTrue(arxiv_id.is_new('1234.5678v1'))
self.assertTrue(arxiv_id.is_new('1234.5678v12'))
# wrong delimiter
self.assertTrue(arxiv_id.is_new('1234a5678'))
# too short
self.assertFalse(arxiv_id.is_new('123.5678'))
self.assertFalse(arxiv_id.is_new('1234.567'))
self.assertFalse(arxiv_id.is_new('1234.5678v'))
# too long
self.assertFalse(arxiv_id.is_new('1234.56788'))
# wrong letter
self.assertFalse(arxiv_id.is_new('1234.5678a1'))
# junk at start
self.assertFalse(arxiv_id.is_new('a1234.5678'))
self.assertFalse(arxiv_id.is_new('a1234.5678v1'))
sel
|
andreoliw/clitoolkit
|
clit/git.py
|
Python
|
bsd-3-clause
| 899
| 0
|
# -*- coding: utf-8 -*-
"""Git tools."""
from shlex import split
from plumbum import Pr
|
ocessExecutionError
from plumbum.cmd import git
DEVELOPMENT_BRANCH = "develop"
def run_git(*args, dry_run=False, quiet=False):
"""Run a git command, print it before executing and capture the output."""
command = git[split(" ".join(args))]
if not q
|
uiet:
print("{}{}".format("[DRY-RUN] " if dry_run else "", command))
if dry_run:
return ""
rv = command()
if not quiet and rv:
print(rv)
return rv
def branch_exists(branch):
"""Return True if the branch exists."""
try:
run_git("rev-parse --verify {}".format(branch), quiet=True)
return True
except ProcessExecutionError:
return False
def get_current_branch():
"""Get the current branch name."""
return run_git("rev-parse --abbrev-ref HEAD", quiet=True).strip()
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDevastatranslationsWordpressCom.py
|
Python
|
bsd-3-clause
| 578
| 0.032872
|
def extractDevastatranslatio
|
nsWordpressCom(item):
'''
Parser for 'devastatranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, n
|
ame, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
pblottiere/QGIS
|
tests/src/python/test_qgstextblock.py
|
Python
|
gpl-2.0
| 2,937
| 0
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsTextBlock.
Run with: ctest -V -R QgsTextBlock
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '12/05/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (
QgsTextBlock,
QgsTextFragment,
QgsStringUtils
)
from qgis.testing import start_app, unittest
start_app()
class TestQgsTextBlock(unittest.TestCase):
def testConstructors(self):
# empty
block = QgsTextBlock()
self.assertEqual(len(block), 0)
# single fragment block
fragment = QgsTextFragment('ludicrous gibs!')
block = QgsTextBlock(fragment)
self.assertEqual(len(block), 1)
self.assertEqual(block[0].text(), fragment.text())
self.assertEqual(block.toPlainText(), 'ludicrous gibs!')
def testAppend(self):
block = QgsTextBlock()
self.assertEqual(len(block), 0)
frag = QgsTextFragment('a')
block.append(frag)
self.assertEqual(len(block), 1)
self.assertEqual(block[0].text(), 'a')
frag = QgsText
|
Fragment('b')
block.append(frag)
self.assertEqual
|
(len(block), 2)
self.assertEqual(block[0].text(), 'a')
self.assertEqual(block[1].text(), 'b')
self.assertEqual(block.toPlainText(), 'ab')
def testAt(self):
block = QgsTextBlock()
block.append(QgsTextFragment('a'))
block.append(QgsTextFragment('b'))
self.assertEqual(len(block), 2)
self.assertEqual(block.at(0).text(), 'a')
self.assertEqual(block.at(1).text(), 'b')
with self.assertRaises(KeyError):
block.at(2)
with self.assertRaises(KeyError):
block.at(-1)
self.assertEqual(block[0].text(), 'a')
self.assertEqual(block[1].text(), 'b')
with self.assertRaises(IndexError):
_ = block[2]
self.assertEqual(block[-1].text(), 'b')
self.assertEqual(block[-2].text(), 'a')
def testClear(self):
block = QgsTextBlock()
block.append(QgsTextFragment('a'))
block.append(QgsTextFragment('b'))
self.assertEqual(len(block), 2)
self.assertFalse(block.empty())
block.clear()
self.assertEqual(len(block), 0)
self.assertTrue(block.empty())
def testCapitalize(self):
fragment = QgsTextFragment('ludicrous gibs!')
block = QgsTextBlock(fragment)
block.append(QgsTextFragment('another part'))
block.applyCapitalization(QgsStringUtils.TitleCase)
self.assertEqual(block.toPlainText(), 'Ludicrous Gibs!Another Part')
if __name__ == '__main__':
unittest.main()
|
ohaut/ohaut-core
|
ohaut/exceptions.py
|
Python
|
gpl-3.0
| 46
| 0
|
class InvalidDeviceType(E
|
xception):
pas
|
s
|
willingc/portal
|
systers_portal/common/tests/test_templatetags.py
|
Python
|
gpl-2.0
| 328
| 0
|
from
|
django.test import TestCase
from common.templatetags.verbose_name import verbose_name
from users.models import SystersUser
class TemplateTagsTestCase(TestCase):
def test_verbose_names(self):
"""Test verbose_name template tag"""
self.assertEqual(verbose_name(SystersUser, "homepage_url"), "Homepage
|
")
|
pekrau/userman
|
userman/dump.py
|
Python
|
mit
| 3,017
| 0.000994
|
" Userman: Dump the database into a JSON file."
import json
import tarfile
from cStringIO import StringIO
from userman import utils
from userman import constants
def dump(db, filename):
"""Dump contents of the database to a tar file, optionally compressed.
Return the number of items, and the number of attachment files dumped."""
count_items = 0
count_files = 0
if filename.endswith('.gz'):
mode = 'w:gz'
elif filename.endswith('.bz2'):
mode = 'w:bz2'
else:
mode = 'w'
outfile = tarfile.open(filename, mode=mode)
for key in db:
if not constants.IUID_RX.match(key): continue
doc = db[key]
del doc['_rev']
info = tarfile.TarInfo(doc['_id'])
data = json.dumps(doc)
info.size = len(data)
outfile.addfile(info, StringIO(data))
count_items += 1
|
for attname in doc.get('_attachments', dict()):
info = tarfile.TarInfo("{0}_att/{1}".format(doc['_id'], attname))
attfile = db.get_attachment(doc, attname)
data = attfile.read()
attfile.close()
info.size = len(data)
|
outfile.addfile(info, StringIO(data))
count_files += 1
outfile.close()
return count_items, count_files
def undump(db, filename):
"""Reverse of dump; load all items from a tar file.
Items are just added to the database, ignoring existing items."""
count_items = 0
count_files = 0
attachments = dict()
infile = tarfile.open(filename, mode='r')
for item in infile:
itemfile = infile.extractfile(item)
itemdata = itemfile.read()
itemfile.close()
if item.name in attachments:
# This relies on an attachment being after its item in the tarfile.
db.put_attachment(doc, itemdata, **attachments.pop(item.name))
count_files += 1
else:
doc = json.loads(itemdata)
# If the user document already exists, do not load again.
if doc[constants.DB_DOCTYPE] == constants.USER:
rows = db.view('user/email', key=doc['email'])
if len(list(rows)) != 0: continue
atts = doc.pop('_attachments', dict())
db.save(doc)
count_items += 1
for attname, attinfo in atts.items():
key = "{0}_att/{1}".format(doc['_id'], attname)
attachments[key] = dict(filename=attname,
content_type=attinfo['content_type'])
infile.close()
return count_items, count_files
if __name__ == '__main__':
import sys
try:
utils.load_settings(filepath=sys.argv[1])
except IndexError:
utils.load_settings()
db = utils.get_db()
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = 'dump.tar.gz'
count_items, count_files = dump(db, filename)
print 'dumped', count_items, 'items and', count_files, 'files to', filename
|
googleinterns/e2e-convrec
|
trainer/preprocessing.py
|
Python
|
apache-2.0
| 5,116
| 0.007428
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing and Infeeding Tools For Redial and MovieLens Data."""
import functools
import json
from absl import logging
import tensorflow.compat.v1 as tf
from trainer import constants
def rd_jsonl_to_tsv(in_fname, out_fname):
"""Converts the redial jsonl to a tsv."""
logging.info("Reading: %s", in_fname)
def fix_spacing(text):
"""Removes extra spaces."""
# Remove incorrect spacing around punctuation.
text = text.replace(" ,", ",").replace(" .", ".").replace(" %", "%")
text = text.replace(" - ", "-").replace(" : ", ":").replace(" / ", "/")
text = text.replace("( ", "(").replace(" )", ")")
text = text.replace("`` ", "\"").replace(" ''", "\"")
text = text.replace(" 's", "'s").replace("s ' ", "s' ")
return text
count = 0
with tf.io.gfile.GFile(in_fname, "rb") as infile,\
tf.io.gfile.
|
GFile(out_fname, "w") as outfile:
for line in infile:
ex = json.loads(line)
conversation = fix_spacing(ex["conversation"])
response = fix_spacing(ex["response"])
# Write this line as <conversation>\t<response>
outfile.w
|
rite("%s\t%s\n" % (conversation, response))
count += 1
tf.logging.log_every_n(
tf.logging.INFO,
"Wrote %d examples to %s." % (count, out_fname),
1000)
return count
def generic_dataset_fn(split, path, reverse=False, shuffle_files=False):
"""Returns a tf dataset of (conversation, response) pairs for redial."""
# We only have one file for each split.
del shuffle_files
# Load lines from the text file as examples.
ds = tf.data.TextLineDataset(path[split])
# Split each "<input>\t<target>" example into
# a (input, target) tuple.
ds = ds.map(
functools.partial(tf.io.decode_csv, record_defaults=["", ""],
field_delim="\t", use_quote_delim=False),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# reverse if necessary
if reverse:
ds = ds.map(lambda *ex: ex[::-1])
# Map each tuple to a {"inputs": ... "targets": ...} dict.
ds = ds.map(lambda *ex: dict(zip(["inputs", "targets"], ex)))
return ds
def generic_preprocessor(ds, label):
"""Prepares text for input into model."""
def normalize_text(text):
"""Lowercase and remove quotes from a TensorFlow string."""
text = tf.strings.lower(text)
return text
def to_inputs_and_targets(ex):
"""apply preprocessing functions and add task label."""
return {
"inputs":
tf.strings.join(
[label, normalize_text(ex["inputs"])]),
"targets": normalize_text(ex["targets"])
}
return ds.map(to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def dataset_fn_wrapper(dataset):
"""Returns the dataset function for the desired dataset.
Args:
dataset: a string representing the desired dataset/task
(rd_recommendations, ml_sequences, ml_tags_normal, ml_tags_reversed,
ml_tags_masked, probe_1, or probe_2)
Returns:
a function that can be passed in as a T5 dataset function
(split, shufffle_files) -> tf.data.dataset
"""
path = {
"rd_recommendations": constants.RD_TSV_PATH,
"ml_sequences": constants.ML_SEQ_TSV_PATH,
"ml_tags_normal": constants.ML_TAGS_TSV_PATH,
"ml_tags_reversed": constants.ML_TAGS_TSV_PATH,
"ml_reviews": constants.ML_REVIEWS_TSV_PATH,
"probe_1": constants.PROBE_1_TSV_PATH,
"probe_1_sequences": constants.PROBE_1_SEQ_TSV_PATH,
"probe_2": constants.PROBE_2_TSV_PATH,
"probe_3": constants.PROBE_3_TSV_PATH,
"probe_4": constants.PROBE_4_TSV_PATH
}[dataset]
reverse = dataset == "ml_tags_reversed"
return lambda split, shuffle_files=False: generic_dataset_fn(split,
path,
reverse,
shuffle_files)
def preprocessor_wrapper(task):
"""Returns the preprocessing function for the desired task.
Args:
task: a string representing the desired task (rd_recommendations,
ml_sequences, ml_tags)
Returns:
a function that can be passed in as a T5 dataset function
(tf.data.dataset) -> tf.data.dataset
"""
label = {
"rd_recommendations": "redial conversation: ",
"ml_sequences": "movielens sequence: ",
"ml_tags": "movielens tags: ",
"ml_reviews": "movielens review: "
}[task]
return lambda ds: generic_preprocessor(ds, label)
|
modoboa/modoboa-amavis
|
modoboa_amavis/modo_extension.py
|
Python
|
mit
| 1,395
| 0
|
# -*- coding: utf-8 -*-
"""
Amavis management frontend.
Provides:
* SQL quarantine management
* Per-domain settings
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy
from modoboa.admin.models import Domain
from modoboa.core.extensions import ModoExtension, exts_pool
from modoboa.parameters import tools as param_tools
from . import __version__, forms
from .lib import create_user_and_policy, create_user_and_use_policy
class Amavis(ModoExtension):
"""The Amavis extension."""
name = "modoboa_amavis"
label = ugettext_lazy("Amavis frontend")
version = __version__
description = ugettext_lazy("Simple amavis management frontend")
url = "quarantine"
available_for_topredirection = True
def load(self):
param_tools.registry.add("global", forms.ParametersForm, "Amavi
|
s")
param_tools.registry.add(
"user", forms.UserSettings, ugettext_lazy("Quarantine"))
def load_initial_data(self):
"""Create records for existing domains and co
|
."""
for dom in Domain.objects.all():
policy = create_user_and_policy("@{0}".format(dom.name))
for domalias in dom.domainalias_set.all():
domalias_pattern = "@{0}".format(domalias.name)
create_user_and_use_policy(domalias_pattern, policy)
exts_pool.register_extension(Amavis)
|
mcxiaoke/python-labs
|
scripts/wechat_upload.py
|
Python
|
apache-2.0
| 8,692
| 0.00115
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2018-01-26
from __future__ import print_function, unicode_literals, absolute_import
import json
import sys
import os
import re
import time
import shutil
import random
import mimetypes
import imghdr
import traceback
import json
import redis
import logging
import requests
from requests.exceptions import RequestException
TYPE_CAT = 'cats'
TYPE_DOG = 'dogs'
TYPE_OTHER = 'others'
SOURCE_ROOT = os.path.join('..', 'images')
TWO_HOUR_EXPIRE = 60*60*2 # in seconds
MEDIA_ID_EXPIRE = TWO_HOUR_EXPIRE * 35 # in seconds
ACCESS_TOKEN_KEY = 'wechat:token:v1:%s'
MEDIA_ID_KEY = 'wechat:media_ids:v1:%s'
MEDIA_ID_OUTPUT = 'data'
MEDIA_ID_USER_KEY = 'wechat:media_ids:user:v1:%s:%s'
MEDIA_ID_FILE = 'media_ids_v1_%s.txt'
UPLOAD_IMAGE_URL = 'https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=image'
GET_TOKEN_URL = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('MediaStore')
def get_wechat_access_token(app_id, app_secret):
url = GET_TOKEN_URL % (app_id, app_secret)
logger.info('get_wechat_access_token url=%s' % url)
response = requests.get(url)
response.encoding = 'utf-8'
logger.info('get_wechat_access_token result=%s' % response.json())
return response.json()['access_token']
class MediaStore(object):
_redis = redis.StrictRedis(decode_responses=True)
def __init__(self, name, app_id, app_secret, r=_redis, expire=MEDIA_ID_EXPIRE):
assert name, 'name can not be None'
assert app_id, 'app_id can not be None'
assert app_secret, 'app_secret can not be None'
self.name = name
self.app_id = app_id
self.app_secret = app_secret
self.expire = expire
self.r = r
logger.debug('__init__ name=%s app_id=%s, app_secret=%s' %
(name, app_id, app_secret))
def _get_media_key(self, type_name=''):
return MEDIA_ID_KEY % type_name
def _get_media_file(self, type_name=''):
return os.path.join(MEDIA_ID_OUTPUT, MEDIA_ID_FILE % type_name)
def _get_user_key(self, user_id, type_name=''):
return MEDIA_ID_USER_KEY % (type_name, user_id)
def _get_access_token(self):
token = self.r.get(ACCESS_TOKEN_KEY % self.app_id)
if not token:
token = get_wechat_access_token(self.app_id, self.app_secret)
logger.info('get_wechat_access_token token=%s' % token)
if token:
self.r.set(ACCESS_TOKEN_KEY % self.app_id, token)
self.r.expire(ACCESS_TOKEN_KEY % self.app_id, TWO_HOUR_EXPIRE)
return token
def clear_media_ids(self, type_name=''):
logger.info('clear_media_ids type=%s' % type_name)
self.r.delete(self._get_media_key(type_name))
def save_media_ids(self, media_ids, type_name='', replace=True):
if media_ids:
with open(self._get_media_file(type_name), 'w') as f:
f.write('\n'.join(media_ids))
key = self._get_media_key(type_name)
if replace:
self.r.delete(key)
rt = self.r.sadd(key, *media_ids)
self.r.expire(key, self.expire)
logger.info('save_media_ids %s media ids saved %s' %
(self.media_ids_length(type_name), rt))
return media_ids
def upload_image(self, filepath):
token = self._get_access_token()
if not token:
raise IOError('token is None')
url = UPLOAD_IMAGE_URL % token
files = {'media': open(filepath, 'rb')}
try:
response = requests.post(url, files=files)
response.enc
|
oding = 'utf-8'
return response.json()['media_id']
except RequestException as e:
logger.error('upload_image error=%s' % e)
def upload_images(self, source_dir, type_name='', max_count=100):
if not source_dir or not os.path.isdir(source_dir):
return
logger.info('upload_images [%s] for type [%s]' % (source_dir, type_name))
names = os.listdir(source_dir)
if len(names) > max_co
|
unt:
names = random.sample(names, max_count)
count = 0
mids = []
for name in names:
filepath = os.path.join(source_dir, name)
filepath = os.path.abspath(filepath)
mime_type, _ = mimetypes.guess_type(name)
if mime_type not in ['image/jpeg', 'image/png', 'image/gif']:
logger.warning('upload_images invalid=%s' % filepath)
continue
logger.info('upload_images file=%s' % filepath)
media_id = self.upload_image(filepath)
if media_id:
logger.info('upload_images result=%s' % media_id)
mids.append(media_id)
count += 1
if count > max_count:
break
self.save_media_ids(mids, type_name)
def random_user_media_id(self, user_id=None, type_name=''):
if not user_id:
return self.random_media_id(type_name)
media_key = self._get_media_key(type_name)
user_key = self._get_user_key(user_id, type_name)
mids = self.r.sdiff(media_key, user_key)
mid = None
if mids:
mid = random.choice(list(mids))
if mid:
self.r.sadd(user_key, mid)
self.r.expire(user_key, self.expire)
if not mid:
self.r.delete(user_key)
mid = self.random_media_id(type_name)
logger.debug('random_user_media_id user_id=%s result=%s' %
(user_id, mid))
return mid
def all_media_ids(self, type_name=''):
return self.r.smembers(self._get_media_key(type_name))
def media_ids_length(self, type_name=''):
return self.r.scard(self._get_media_key(type_name))
def random_media_id(self, type_name=''):
return self.r.srandmember(self._get_media_key(type_name))
from config import WECHAT_APPID, WECHAT_APPSECRET, WECHAT2_APPID, WECHAT2_APPSECRET
store1 = MediaStore('Cat', WECHAT_APPID, WECHAT_APPSECRET)
store2 = MediaStore('Miu', WECHAT2_APPID, WECHAT2_APPSECRET)
def update_app(store, root=SOURCE_ROOT):
for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER):
source_dir = os.path.join(root, type_name)
store.upload_images(source_dir, type_name)
def update_all(root=SOURCE_ROOT):
check_all(root)
update_app(store1, root)
update_app(store2, root)
def check_all(root=SOURCE_ROOT):
for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER):
source_dir = os.path.abspath(os.path.join(root, type_name))
if not os.path.exists(source_dir):
print('ERROR: check_all source dir [%s] not exists' % source_dir)
exit(1)
if not os.path.isdir(source_dir):
print('ERROR: check_all source dir [%s] not directory' % source_dir)
exit(2)
if not os.listdir(source_dir):
print('ERROR: check_all source dir [%s] is empty' % source_dir)
exit(2)
print('all directories exists, check passed.')
def test_all():
for store in [store1, store2]:
for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER):
print('\n[Store:%s] found %s values for type %s, read test:'
% (store.name, store.media_ids_length(type_name), type_name))
for i in range(0, 10):
print(store1.random_user_media_id('test', type_name))
for i in range(0,10):
assert store1.random_user_media_id('test', type_name), 'No media id found'
assert store1.random_media_id(type_name), 'No media id found'
print('all tests passed.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
prog='wechat_uploader', description='WeChat Images Uploader v0.1.0')
parser.add_argument('-c', '--check', action="store_true",
help='check source dir')
parser.add_argument('-t', '--test', action="store_true"
|
mookrs/fanpy
|
tests/test_sanity.py
|
Python
|
mit
| 1,473
| 0.000679
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
import pickle
import json
from fanpy import Fanfou, FanfouHTTPError, NoAuth
from fanpy.api import FanfouDictRespo
|
nse, FanfouListResponse, POST_ACTIONS, method_for_uri
noauth = NoAuth()
fan
|
fou_na = Fanfou(auth=noauth)
AZaz = 'abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def get_random_str():
return ''.join(random.choice(AZaz) for _ in range(10))
def test_FanfouHTTPError_raised_for_invalid_oauth():
test_passed = False
try:
fanfou_na.statuses.mentions()
except FanfouHTTPError:
test_passed = True
assert test_passed
def test_pickle_ability():
res = FanfouDictResponse({'a': 'b'})
p = pickle.dumps(res)
res2 = pickle.loads(p)
assert res == res2
assert res2['a'] == 'b'
res = FanfouListResponse([1, 2, 3])
p = pickle.dumps(res)
res2 = pickle.loads(p)
assert res == res2
assert res2[2] == 3
def test_json_ability():
res = FanfouDictResponse({'a': 'b'})
p = json.dumps(res)
res2 = json.loads(p)
assert res == res2
assert res2['a'] == 'b'
res = FanfouListResponse([1, 2, 3])
p = json.dumps(res)
res2 = json.loads(p)
assert res == res2
assert res2[2] == 3
def test_method_for_uri():
for action in POST_ACTIONS:
assert method_for_uri(get_random_str() + '/' + action) == 'POST'
assert method_for_uri('statuses/home_timeline') == 'GET'
|
particl/particl-core
|
test/functional/feature_part_smsgpaidfee_ext.py
|
Python
|
mit
| 4,260
| 0.003286
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_particl import (
ParticlTestFramework,
isclose,
)
from test_framework.messages import COIN
class SmsgPaidFeeExtTest(ParticlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-debug', '-noacceptnonstdtxn', '-reservebalance=10000000', '-txindex'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes(0, 1)
self.sync_all()
def run_test(self):
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
self.import_genesis_coins_b(nodes[1])
address0 = nod
|
es[0].getnewaddress()
address1 = nodes[1].getnewaddress()
nodes[0].smsgaddlocaladdress(address0)
nodes[1].smsgaddaddress(address0, nodes[0].smsglocalkeys()['wallet_keys'][0]['public_key'])
text = 'Some text to test'
ro = nodes[1].smsgsend(address1, address0, text, True, 10, True)
|
assert(ro['result'] == 'Not Sent.')
assert(isclose(ro['fee'], 0.00159000))
assert(nodes[0].smsggetfeerate() == 50000)
assert(nodes[1].smsggetfeerate() == 50000)
ro = nodes[0].walletsettings('stakingoptions', {'smsgfeeratetarget' : 0.001})
assert(float(ro['stakingoptions']['smsgfeeratetarget']) == 0.001)
self.stakeBlocks(49)
assert(nodes[0].smsggetfeerate() == 50000)
ro = nodes[1].smsgsend(address1, address0, text, True, 10)
assert(ro['result'] == 'Sent.')
assert('msgid' in ro)
assert('txid' in ro)
assert(isclose(ro['fee'], 0.00159000))
self.stakeBlocks(1)
assert(nodes[0].smsggetfeerate() == 61939)
ro = nodes[1].smsgsend(address1, address0, text, True, 10, True)
assert(ro['result'] == 'Not Sent.')
assert(isclose(ro['fee'], 0.00189080))
ro = nodes[1].getblock(nodes[1].getblockhash(50), 2)
assert(ro['tx'][0]['vout'][0]['smsgfeerate'] * COIN == 61939)
assert(ro['tx'][0]['vout'][0]['smsgdifficulty'] == '1f0fffff')
ro = nodes[0].walletsettings('stakingoptions', {'smsgdifficultytarget' : '000000000000bfffffffffffffffffffffffffffffffffffffffffffffffffff', 'smsgfeeratetarget' : 0.001})
assert(float(ro['stakingoptions']['smsgfeeratetarget']) == 0.001)
assert(ro['stakingoptions']['smsgdifficultytarget'] == '000000000000bfffffffffffffffffffffffffffffffffffffffffffffffffff')
self.sync_all()
self.stakeBlocks(1)
assert(nodes[0].smsggetfeerate() == 61939)
ro = nodes[1].getrawtransaction(nodes[1].getblockreward(51)['coinstake'], True)
block_51_smsgfeerate = ro['vout'][0]['smsgfeerate'] * COIN
block_51_smsgdifficulty = int(ro['vout'][0]['smsgdifficulty'], 16)
assert(block_51_smsgfeerate > 61939)
assert(block_51_smsgdifficulty < 0x1f0fffff)
self.waitForSmsgExchange(1, 1, 0)
ro = nodes[0].smsginbox('all')
assert(len(ro['messages']) == 1)
assert(ro['messages'][0]['text'] == text)
self.log.info('Verify node settings survive a restart')
self.stop_node(0)
self.start_node(0, self.extra_args[0] + ['-wallet=default_wallet',])
self.connect_nodes(0, 1)
ro = nodes[0].walletsettings('stakingoptions')
assert(float(ro['stakingoptions']['smsgfeeratetarget']) == 0.001)
assert(ro['stakingoptions']['smsgdifficultytarget'] == '000000000000bfffffffffffffffffffffffffffffffffffffffffffffffffff')
self.stakeBlocks(1)
ro = nodes[1].getblock(nodes[1].getblockhash(52), 2)
assert(ro['tx'][0]['vout'][0]['smsgfeerate'] * COIN > block_51_smsgfeerate)
assert(int(ro['tx'][0]['vout'][0]['smsgdifficulty'], 16) < block_51_smsgdifficulty)
if __name__ == '__main__':
SmsgPaidFeeExtTest().main()
|
benob/chainer
|
chainer/functions/array/transpose_sequence.py
|
Python
|
mit
| 1,673
| 0.000598
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _transpose(xs, length):
xp = cuda.get_array_module(*xs)
lengths = numpy.zeros(length, dtype='i')
for i, x in enumerate(xs):
lengths[0:len(x)] = i + 1
dtype = xs[0].dtype
unit = xs[0].shape[1:]
outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])
for i, x in enumerate(xs):
for p, xi in enumerate(x):
outs[p][i] = xi
return outs
class TransposeSequence(function.Function):
"""Function that transposes a list of Variables."""
def check_type_forward(self, xs_type):
for p, n in zip(xs_type, xs_type[1:]):
type_check.expect(
p.shape[0] >= n.shape[0],
p.shape[1:] == n.shape[1:],
)
def forward(self, xs):
if len(xs) == 0:
return ()
return _transpose(xs, len(xs[0]))
def backward(self, xs, gs):
|
return _transpose(gs, len(xs))
def transpose_sequence(xs):
"""Transpose a list of Variables.
This function transposes
|
a list of :class:`~chainer.Variable` s and returns
a list of :class:`Variable` s.
For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function
returns ``[(0, 4, 6), (1, 5), (2), (3)]``.
Note that a given list needs to be sorted by each length of
:class:`~chainer.Variable`.
Args:
xs (list of ~chainer.Variable): Variables to transpose.
Returns:
tuple or Variable: Transposed list.
"""
ys = TransposeSequence()(*xs)
if not isinstance(ys, tuple):
ys = (ys,)
return ys
|
hubblestack/hubble-salt
|
hubblestack_nova/win_pkg.py
|
Python
|
apache-2.0
| 8,165
| 0.003062
|
# -*- encoding: utf-8 -*-
'''
:maintainer: HubbleStack
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import fnmatch
import logging
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = 'win_pkg'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
|
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_ca
|
ses.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
try:
__pkgdata__ = __salt__['pkg.list_pkgs']()
except CommandExecutionError:
__salt__['pkg.refresh_db']()
__pkgdata__ = __salt__['pkg.list_pkgs']()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('package audit __data__:')
log.debug(__data__)
log.debug('package audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __pkgdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Blacklisted package '{0}' is installed " \
"on the system".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __pkgdata__:
audit_value = __pkgdata__[name]
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite" \
" package '{2}' is not installed on" \
" the system".format(match_output,
tag_data['value_type'],
name)
ret['Failure'].append(tag_data)
else:
tag_data['failure_reason'] = "Version '{0}({1}) of the requisite package" \
" '{2}' is not installed on the system" \
.format(match_output, tag_data['value_type'], name)
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _translate_value_type(current, value, evaluator):
if 'equal' in value.lower() and LooseVersion(current) == LooseVersion(evaluator):
return True
if 'less' in value.lower() and LooseVersion(current) <= LooseVersion(evaluator):
return True
if 'more' in value.lower() and LooseVersion(current) >= LooseVersion(evaluator):
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.