text stringlengths 4 1.02M | meta dict |
|---|---|
import json
import logging
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from libsolr.api import SolrApi
from indexer.management.commands import indexer_setup
from search.api import _guess_gap, _zoom_range_facet, _new_range_facet
from search.conf import SOLR_URL
from search.data_export import download as export_download
from search.decorators import allow_owner_only, allow_viewer_only
from search.management.commands import search_setup
from search.models import Collection, augment_solr_response, augment_solr_exception, pairwise2
from search.search_controller import SearchController
LOG = logging.getLogger(__name__)
def index(request):
hue_collections = SearchController(request.user).get_search_collections()
collection_id = request.GET.get('collection')
if not hue_collections or not collection_id:
return admin_collections(request, True)
try:
collection = hue_collections.get(id=collection_id)
except Exception, e:
raise PopupException(e, title=_("Dashboard does not exist or you don't have the permission to access it."))
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({'collections': [], 'layout': []}),
'is_owner': request.user == collection.owner
})
def new_search(request):
collections = SearchController(request.user).get_all_indexes()
if not collections:
return no_collections(request)
collection = Collection(name=collections[0], label=collections[0])
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({
'collections': collections,
'layout': [
{"size":2,"rows":[{"widgets":[]}],"drops":["temp"],"klass":"card card-home card-column span2"},
{"size":10,"rows":[{"widgets":[
{"size":12,"name":"Filter Bar","widgetType":"filter-widget",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]},
{"widgets":[
{"size":12,"name":"Grid Results","widgetType":"resultset-widget",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]}],
"drops":["temp"],"klass":"card card-home card-column span10"},
]
}),
'is_owner': True
})
def browse(request, name):
collections = SearchController(request.user).get_all_indexes()
if not collections:
return no_collections(request)
collection = Collection(name=name, label=name)
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({
'autoLoad': True,
'collections': collections,
'layout': [
{"size":12,"rows":[{"widgets":[
{"size":12,"name":"Grid Results","id":"52f07188-f30f-1296-2450-f77e02e1a5c0","widgetType":"resultset-widget",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]}],
"drops":["temp"],"klass":"card card-home card-column span10"}
]
}),
'is_owner': True
})
@allow_viewer_only
def search(request):
response = {}
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
query['download'] = 'download' in request.POST
if collection:
try:
response = SolrApi(SOLR_URL.get(), request.user).query(collection, query)
response = augment_solr_response(response, collection, query)
except RestException, e:
try:
response['error'] = json.loads(e.message)['error']['msg']
except:
response['error'] = force_unicode(str(e))
except Exception, e:
raise PopupException(e, title=_('Error while accessing Solr'))
response['error'] = force_unicode(str(e))
else:
response['error'] = _('There is no collection to search.')
if 'error' in response:
augment_solr_exception(response, collection)
return JsonResponse(response)
@allow_owner_only
def save(request):
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}'))
layout = json.loads(request.POST.get('layout', '{}'))
collection['template']['extracode'] = escape(collection['template']['extracode'])
if collection:
if collection['id']:
hue_collection = Collection.objects.get(id=collection['id'])
else:
hue_collection = Collection.objects.create2(name=collection['name'], label=collection['label'], owner=request.user)
hue_collection.update_properties({'collection': collection})
hue_collection.update_properties({'layout': layout})
hue_collection.name = collection['name']
hue_collection.label = collection['label']
hue_collection.enabled = collection['enabled']
hue_collection.save()
response['status'] = 0
response['id'] = hue_collection.id
response['message'] = _('Page saved !')
else:
response['message'] = _('There is no collection to search.')
return JsonResponse(response)
@allow_viewer_only
def download(request):
try:
file_format = 'csv' if 'csv' in request.POST else 'xls' if 'xls' in request.POST else 'json'
response = search(request)
if file_format == 'json':
docs = json.loads(response.content)['response']['docs']
resp = JsonResponse(docs, safe=False)
resp['Content-Disposition'] = 'attachment; filename=%s.%s' % ('query_result', file_format)
return resp
else:
collection = json.loads(request.POST.get('collection', '{}'))
return export_download(json.loads(response.content), file_format, collection)
except Exception, e:
raise PopupException(_("Could not download search results: %s") % e)
def no_collections(request):
return render('no_collections.mako', request, {})
def admin_collections(request, is_redirect=False):
existing_hue_collections = SearchController(request.user).get_search_collections()
if request.GET.get('format') == 'json':
collections = []
for collection in existing_hue_collections:
massaged_collection = {
'id': collection.id,
'name': collection.name,
'label': collection.label,
'enabled': collection.enabled,
'isCoreOnly': collection.is_core_only,
'absoluteUrl': collection.get_absolute_url(),
'owner': collection.owner and collection.owner.username,
'isOwner': collection.owner == request.user or request.user.is_superuser
}
collections.append(massaged_collection)
return JsonResponse(collections, safe=False)
return render('admin_collections.mako', request, {
'existing_hue_collections': existing_hue_collections,
'is_redirect': is_redirect
})
def admin_collection_delete(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
collections = json.loads(request.POST.get('collections'))
searcher = SearchController(request.user)
response = {
'result': searcher.delete_collections([collection['id'] for collection in collections])
}
return JsonResponse(response)
@allow_owner_only
def admin_collection_copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
collections = json.loads(request.POST.get('collections'))
searcher = SearchController(request.user)
response = {
'result': searcher.copy_collections([collection['id'] for collection in collections])
}
return JsonResponse(response)
def query_suggest(request, collection_id, query=""):
hue_collection = Collection.objects.get(id=collection_id)
result = {'status': -1, 'message': 'Error'}
solr_query = {}
solr_query['collection'] = hue_collection.name
solr_query['q'] = query
try:
response = SolrApi(SOLR_URL.get(), request.user).suggest(solr_query, hue_collection)
result['message'] = response
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def index_fields_dynamic(request):
result = {'status': -1, 'message': 'Error'}
try:
name = request.POST['name']
hue_collection = Collection(name=name, label=name)
dynamic_fields = SolrApi(SOLR_URL.get(), request.user).luke(hue_collection.name)
result['message'] = ''
result['fields'] = [Collection._make_field(name, properties)
for name, properties in dynamic_fields['fields'].iteritems() if 'dynamicBase' in properties]
result['gridlayout_header_fields'] = [Collection._make_gridlayout_header_field({'name': name}, True)
for name, properties in dynamic_fields['fields'].iteritems() if 'dynamicBase' in properties]
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
@allow_viewer_only
def get_document(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
doc_id = request.POST.get('id')
if doc_id:
result['doc'] = SolrApi(SOLR_URL.get(), request.user).get(collection['name'], doc_id)
if result['doc']['doc']:
result['status'] = 0
result['message'] = ''
else:
result['status'] = 1
result['message'] = _('No document was returned by Solr.')
else:
result['message'] = _('This document does not have any index id.')
result['status'] = 1
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
@allow_viewer_only
def get_stats(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
analysis = json.loads(request.POST.get('analysis', '{}'))
field = analysis['name']
facet = analysis['stats']['facet']
result['stats'] = SolrApi(SOLR_URL.get(), request.user).stats(collection['name'], [field], query, facet)
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = unicode(str(e), "utf8")
if 'not currently supported' in result['message']:
result['status'] = 1
result['message'] = _('This field does not support stats')
return JsonResponse(result)
@allow_viewer_only
def get_terms(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
analysis = json.loads(request.POST.get('analysis', '{}'))
field = analysis['name']
properties = {
'terms.limit': 25,
'terms.prefix': analysis['terms']['prefix']
# lower
# limit
# mincount
# maxcount
}
result['terms'] = SolrApi(SOLR_URL.get(), request.user).terms(collection['name'], field, properties)
result['terms'] = pairwise2(field, [], result['terms']['terms'][field])
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = unicode(str(e), "utf8")
if 'not currently supported' in result['message']:
result['status'] = 1
result['message'] = _('This field does not support stats')
return JsonResponse(result)
@allow_viewer_only
def get_timeline(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
facet = json.loads(request.POST.get('facet', '{}'))
qdata = json.loads(request.POST.get('qdata', '{}'))
multiQ = request.POST.get('multiQ', 'query')
if multiQ == 'query':
label = qdata['q']
query['qs'] = [qdata]
elif facet['type'] == 'range':
_prop = filter(lambda prop: prop['from'] == qdata, facet['properties'])[0]
label = '%(from)s - %(to)s ' % _prop
facet_id = facet['id']
# Only care about our current field:value filter
for fq in query['fqs']:
if fq['id'] == facet_id:
fq['properties'] = [_prop]
else:
label = qdata
facet_id = facet['id']
# Only care about our current field:value filter
for fq in query['fqs']:
if fq['id'] == facet_id:
fq['filter'] = [{'value': qdata, 'exclude': False}]
# Remove other facets from collection for speed
collection['facets'] = filter(lambda f: f['widgetType'] == 'histogram-widget', collection['facets'])
response = SolrApi(SOLR_URL.get(), request.user).query(collection, query)
response = augment_solr_response(response, collection, query)
label += ' (%s) ' % response['response']['numFound']
result['series'] = {'label': label, 'counts': response['normalized_facets'][0]['counts']}
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
@allow_viewer_only
def new_facet(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
facet_id = request.POST['id']
facet_label = request.POST['label']
facet_field = request.POST['field']
widget_type = request.POST['widget_type']
result['message'] = ''
result['facet'] = _create_facet(collection, request.user, facet_id, facet_label, facet_field, widget_type)
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def _create_facet(collection, user, facet_id, facet_label, facet_field, widget_type):
properties = {
'sort': 'desc',
'canRange': False,
'stacked': False,
'limit': 10,
'mincount': 0,
'isDate': False
}
if widget_type in ('tree-widget', 'heatmap-widget', 'map-widget'):
facet_type = 'pivot'
else:
solr_api = SolrApi(SOLR_URL.get(), user)
range_properties = _new_range_facet(solr_api, collection, facet_field, widget_type)
if range_properties:
facet_type = 'range'
properties.update(range_properties)
properties['initial_gap'] = properties['gap']
properties['initial_start'] = properties['start']
properties['initial_end'] = properties['end']
elif widget_type == 'hit-widget':
facet_type = 'query'
else:
facet_type = 'field'
if widget_type in ('tree-widget', 'heatmap-widget', 'map-widget'):
properties['mincount'] = 1
properties['facets'] = []
properties['facets_form'] = {'field': '', 'mincount': 1, 'limit': 5}
if widget_type == 'map-widget':
properties['scope'] = 'world'
properties['limit'] = 100
else:
properties['scope'] = 'stack' if widget_type == 'heatmap-widget' else 'tree'
return {
'id': facet_id,
'label': facet_label,
'field': facet_field,
'type': facet_type,
'widgetType': widget_type,
'properties': properties
}
@allow_viewer_only
def get_range_facet(request):
result = {'status': -1, 'message': ''}
try:
collection = json.loads(request.POST.get('collection', '{}'))
facet = json.loads(request.POST.get('facet', '{}'))
action = request.POST.get('action', 'select')
solr_api = SolrApi(SOLR_URL.get(), request.user)
if action == 'select':
properties = _guess_gap(solr_api, collection, facet, facet['properties']['start'], facet['properties']['end'])
else:
properties = _zoom_range_facet(solr_api, collection, facet) # Zoom out
result['properties'] = properties
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def get_collection(request):
result = {'status': -1, 'message': ''}
try:
name = request.POST['name']
collection = Collection(name=name, label=name)
collection_json = collection.get_c(request.user)
result['collection'] = json.loads(collection_json)
result['status'] = 0
except Exception, e:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def get_collections(request):
result = {'status': -1, 'message': ''}
try:
show_all = json.loads(request.POST.get('show_all'))
result['collection'] = SearchController(request.user).get_all_indexes(show_all=show_all)
result['status'] = 0
except Exception, e:
if 'does not have privileges' in str(e):
result['status'] = 0
result['collection'] = [json.loads(request.POST.get('collection'))['name']]
else:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def install_examples(request):
result = {'status': -1, 'message': ''}
if not request.user.is_superuser:
return PopupException(_("You must be a superuser."))
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
search_setup.Command().handle_noargs()
indexer_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
| {
"content_hash": "f5100a6eb3231bf044842afb2e34543c",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 134,
"avg_line_length": 31.765880217785845,
"alnum_prop": 0.6380049134434097,
"repo_name": "nvoron23/hue",
"id": "54498844a1be8b35e670fc57754c9e1e72f3a9e1",
"size": "18295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/search/src/search/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1712779"
},
{
"name": "C++",
"bytes": "178518"
},
{
"name": "CSS",
"bytes": "415919"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21211447"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2547814"
},
{
"name": "Makefile",
"bytes": "87389"
},
{
"name": "Mako",
"bytes": "2041625"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "31227804"
},
{
"name": "Scala",
"bytes": "75705"
},
{
"name": "Shell",
"bytes": "41224"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
} |
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from config import KafkaConfig
from kafkatest.services.kafka import config_property
from kafkatest.services.kafka.version import TRUNK
from kafkatest.services.kafka.directory import kafka_dir, KAFKA_TRUNK
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.services.security.minikdc import MiniKdc
import json
import re
import signal
import subprocess
import time
class KafkaService(JmxMixin, Service):
logs = {
"kafka_log": {
"path": "/mnt/kafka.log",
"collect_default": True},
"kafka_operational_logs": {
"path": "/mnt/kafka-operational-logs",
"collect_default": True},
"kafka_data": {
"path": "/mnt/kafka-data-logs",
"collect_default": False}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, topics=None, version=TRUNK, quota_config=None, jmx_object_names=None, jmx_attributes=[]):
"""
:type context
:type zk: ZookeeperService
:type topics: dict
"""
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
self.log_level = "DEBUG"
self.zk = zk
self.quota_config = quota_config
self.security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.sasl_mechanism = sasl_mechanism
self.topics = topics
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
@property
def security_config(self):
return SecurityConfig(self.security_protocol, self.interbroker_security_protocol, sasl_mechanism=self.sasl_mechanism)
def start(self):
if self.security_config.has_sasl_kerberos:
self.minikdc = MiniKdc(self.context, self.nodes)
self.minikdc.start()
else:
self.minikdc = None
Service.start(self)
# Create topics if necessary
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg)
def prop_file(self, node):
cfg = KafkaConfig(**node.config)
cfg[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
cfg[config_property.ZOOKEEPER_CONNECT] = self.zk.connect_setting()
# TODO - clean up duplicate configuration logic
prop_file = cfg.render()
prop_file += self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config,
interbroker_security_protocol=self.interbroker_security_protocol,
sasl_mechanism=self.sasl_mechanism)
return prop_file
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export LOG_DIR=/mnt/kafka-operational-logs/; "
cmd += "export KAFKA_OPTS=%s; " % self.security_config.kafka_opts
cmd += "/opt/" + kafka_dir(node) + "/bin/kafka-server-start.sh /mnt/kafka.properties 1>> /mnt/kafka.log 2>> /mnt/kafka.log &"
return cmd
def start_node(self, node):
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file("/mnt/kafka.properties", prop_file)
self.security_config.setup_node(node)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log("/mnt/kafka.log") as monitor:
node.account.ssh(cmd)
monitor.wait_until("Kafka Server.*started", timeout_sec=30, err_msg="Kafka server didn't finish startup")
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % str(node))
def pids(self, node):
"""Return process ids associated with running processes on the given node."""
try:
cmd = "ps ax | grep -i kafka | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=20, err_msg="Kafka node failed to stop")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_process("kafka", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf /mnt/*", allow_fail=False)
def create_topic(self, topic_cfg, node=None):
"""Run the admin tool create topic command.
Specifying node is optional, and may be done if for different kafka nodes have different versions,
and we care where command gets run.
If the node is not specified, run the command from self.nodes[0]
"""
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s", topic_cfg["topic"], topic_cfg)
cmd = "/opt/%s/bin/kafka-topics.sh " % kafka_dir(node)
cmd += "--zookeeper %(zk_connect)s --create --topic %(topic)s --partitions %(partitions)d --replication-factor %(replication)d" % {
'zk_connect': self.zk.connect_setting(),
'topic': topic_cfg.get("topic"),
'partitions': topic_cfg.get('partitions', 1),
'replication': topic_cfg.get('replication-factor', 1)
}
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
time.sleep(1)
self.logger.info("Checking to see if topic was properly created...\n%s" % cmd)
for line in self.describe_topic(topic_cfg["topic"]).split("\n"):
self.logger.info(line)
def describe_topic(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "/opt/%s/bin/kafka-topics.sh --zookeeper %s --topic %s --describe" % \
(kafka_dir(node), self.zk.connect_setting(), topic)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def verify_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "/opt/%s/bin/kafka-reassign-partitions.sh " % kafka_dir(node)
cmd += "--zookeeper %s " % self.zk.connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*is in progress.*", output) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "/opt/%s/bin/kafka-reassign-partitions.sh " % kafka_dir(node)
cmd += "--zookeeper %s " % self.zk.connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def restart_node(self, node, clean_shutdown=True):
"""Restart the given node."""
self.stop_node(node, clean_shutdown)
self.start_node(node)
def leader(self, topic, partition=0):
""" Get the leader replica for the given topic and partition.
"""
kafka_dir = KAFKA_TRUNK
cmd = "/opt/%s/bin/kafka-run-class.sh kafka.tools.ZooKeeperMainWrapper -server %s " %\
(kafka_dir, self.zk.connect_setting())
cmd += "get /brokers/topics/%s/partitions/%d/state" % (topic, partition)
self.logger.debug(cmd)
node = self.zk.nodes[0]
self.logger.debug("Querying zookeeper to find leader replica for topic %s: \n%s" % (cmd, topic))
partition_state = None
for line in node.account.ssh_capture(cmd):
# loop through all lines in the output, but only hold on to the first match
if partition_state is None:
match = re.match("^({.+})$", line)
if match is not None:
partition_state = match.groups()[0]
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def bootstrap_servers(self):
"""Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...
This is the format expected by many config files.
"""
return ','.join([node.account.hostname + ":9092" for node in self.nodes])
| {
"content_hash": "2d2c9e6e6e82cd334f8cb0c0781d1d9a",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 159,
"avg_line_length": 40.175257731958766,
"alnum_prop": 0.6042254725857498,
"repo_name": "racker/kafka",
"id": "27530f0c4fffe32ab0d45594a64923f39bd41abc",
"size": "12472",
"binary": false,
"copies": "1",
"ref": "refs/heads/maas-dev",
"path": "tests/kafkatest/services/kafka/kafka.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "20094"
},
{
"name": "HTML",
"bytes": "5443"
},
{
"name": "Java",
"bytes": "2783531"
},
{
"name": "Python",
"bytes": "267872"
},
{
"name": "Scala",
"bytes": "2772471"
},
{
"name": "Shell",
"bytes": "43076"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
import sys
from collections import namedtuple
Segment = namedtuple('Segment', 'start end')
def optimal_points(segments):
points = []
#write your code here
for s in segments:
points.append(s.start)
points.append(s.end)
return points
if __name__ == '__main__':
input = sys.stdin.read()
n, *data = map(int, input.split())
segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))
points = optimal_points(segments)
print(len(points))
for p in points:
print(p, end=' ')
| {
"content_hash": "ffb4b61cc64528a6f7d0786707cfb808",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 83,
"avg_line_length": 26.38095238095238,
"alnum_prop": 0.6064981949458483,
"repo_name": "oy-vey/algorithms-and-data-structures",
"id": "fabdedd54a265341b578e464deb816c8f4fd63ae",
"size": "569",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "1-AlgorithmicToolbox/Week3/covering_segments/covering_segments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "509"
},
{
"name": "Python",
"bytes": "46535"
},
{
"name": "Shell",
"bytes": "1375"
}
],
"symlink_target": ""
} |
from . import database
from . import config
from . import simpcoins
| {
"content_hash": "895b1d9e2898275eb314b25bc8c81dca",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 23,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7794117647058824,
"repo_name": "IsmaelRLG/simpbot",
"id": "a9f1bab5a677b7ffb6bb959556a31453ef690677",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extra/simpcoins/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "310649"
}
],
"symlink_target": ""
} |
from github import Github
import datetime
from employee import EmployCommit
import sys
# input_username = input("Github username: ")
# input_password = input("password: ")
# 接受命令行参数
input_username = sys.argv[1]
input_password = sys.argv[2]
github_obj = Github(input_username, input_password)
contributions = {}
repo_name = input("repo: ")
repo_obj = github_obj.get_user().get_repo(repo_name)
print('In', repo_obj.name, ',we need to choose a branch: ')
branch_name = input("branch: ")
commites = repo_obj.get_commits(sha=branch_name,
since=datetime.datetime.now() - datetime.timedelta(days=7),
until=datetime.datetime.now()
)
for commit in commites:
if commit.author is not None:
author_name = commit.author.name
commit_dic = {
"author": commit.author.name,
"sha": commit.sha,
"time": commit.last_modified,
"url": commit.html_url,
"message": commit.commit.message,
}
else:
author_name = "None"
commit_dic = {
"author": "None",
"sha": commit.sha,
"time": commit.last_modified,
"url": commit.html_url,
"message": commit.commit.message,
}
if author_name in contributions:
contributions[author_name].add_commits_tot()
contributions[author_name].add_commit(commit_dic)
else:
contributions[author_name] = EmployCommit(name=author_name,
commits_tot=1,
commits=[commit_dic])
for key in contributions:
contributions[key].show_commit_tot()
contributions[key].write_2_md()
print("\n")
| {
"content_hash": "82bb8aae1c3358080dde4d64ec0547d6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 91,
"avg_line_length": 30.440677966101696,
"alnum_prop": 0.5629175946547884,
"repo_name": "rabitdash/practice",
"id": "05f315b9c31e3c6be24ac45d3580fd196fe6795b",
"size": "1833",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python-pj/repobot/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8563"
},
{
"name": "C++",
"bytes": "42312"
},
{
"name": "HTML",
"bytes": "208"
},
{
"name": "Java",
"bytes": "1409"
},
{
"name": "JavaScript",
"bytes": "835"
},
{
"name": "Lua",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "699"
},
{
"name": "Python",
"bytes": "152045"
},
{
"name": "Shell",
"bytes": "2102"
}
],
"symlink_target": ""
} |
from os.path import abspath, join, dirname
from preggy import expect
import mock
# from tornado.concurrent import Future
import tornado.web
from tests.base import PythonTestCase
import thumbor.loaders.strict_https_loader as loader
from thumbor.context import Context
from thumbor.config import Config
from thumbor.loaders import LoaderResult
fixture_for = lambda filename: abspath(join(dirname(__file__), 'fixtures', filename))
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello')
class EchoUserAgentHandler(tornado.web.RequestHandler):
def get(self):
self.write(self.request.headers['User-Agent'])
class HandlerMock(object):
def __init__(self, headers):
self.request = RequestMock(headers)
class RequestMock(object):
def __init__(self, headers):
self.headers = headers
class ResponseMock:
def __init__(self, error=None, content_type=None, body=None, code=None):
self.error = error
self.code = code
self.time_info = None
self.headers = {
'Content-Type': 'image/jpeg'
}
if content_type:
self.headers['Content-Type'] = content_type
self.body = body
class ReturnContentTestCase(PythonTestCase):
def test_return_none_on_error(self):
response_mock = ResponseMock(error='Error', code=599)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
def test_return_body_if_valid(self):
response_mock = ResponseMock(body='body', code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('body')
def test_return_upstream_error_on_body_none(self):
response_mock = ResponseMock(body=None, code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
expect(result.error).to_equal(LoaderResult.ERROR_UPSTREAM)
def test_return_upstream_error_on_body_empty(self):
response_mock = ResponseMock(body='', code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
expect(result.error).to_equal(LoaderResult.ERROR_UPSTREAM)
class ValidateUrlTestCase(PythonTestCase):
def test_with_allowed_sources(self):
config = Config()
config.ALLOWED_SOURCES = ['s.glbimg.com']
ctx = Context(None, config, None)
expect(
loader.validate(
ctx,
'http://www.google.com/logo.jpg'
)
).to_be_false()
expect(
loader.validate(
ctx,
'http://s2.glbimg.com/logo.jpg'
)
).to_be_false()
expect(
loader.validate(
ctx,
'/glob=:sfoir%20%20%3Co-pmb%20%20%20%20_%20%20%20%200%20%20g.-%3E%3Ca%20hplass='
)
).to_be_false()
expect(
loader.validate(ctx, 'https://s.glbimg.com/logo.jpg')).to_be_true()
def test_without_allowed_sources(self):
config = Config()
config.ALLOWED_SOURCES = []
ctx = Context(None, config, None)
is_valid = loader.validate(ctx, 'https://www.google.com/logo.jpg')
expect(is_valid).to_be_true()
is_valid = loader.validate(ctx, 'http://www.google.com/logo.jpg')
expect(is_valid).to_be_false()
class NormalizeUrlTestCase(PythonTestCase):
def test_should_normalize_url(self):
expect(loader._normalize_url('https://some.url')).to_equal('https://some.url')
expect(loader._normalize_url('some.url')).to_equal('https://some.url')
def test_should_normalize_url_but_keep_quotes_after_the_domain(self):
for url in ['https://some.url/my image', 'some.url/my%20image']:
expect(loader._normalize_url(url)).to_equal('https://some.url/my%20image')
def test_should_normalize_quoted_url(self):
url = 'https%3A//www.google.ca/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png'
expected = 'https://www.google.ca/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png'
result = loader._normalize_url(url)
expect(result).to_equal(expected)
| {
"content_hash": "bb98f0eab67b40768fe8422f52415f9a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 102,
"avg_line_length": 34.62,
"alnum_prop": 0.6325823223570191,
"repo_name": "2947721120/thumbor",
"id": "27ad951b765fc184783a4d7bc9101c3bde27eda0",
"size": "5444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/loaders/test_strict_https_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58039"
},
{
"name": "HTML",
"bytes": "1737"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "2194"
},
{
"name": "Python",
"bytes": "542599"
}
],
"symlink_target": ""
} |
r"""
Configuration options which may be set on the command line or in config files.
The schema for each option is defined using the Opt sub-classes, e.g.:
::
common_opts = [
cfg.StrOpt('bind_host',
default='0.0.0.0',
help='IP address to listen on'),
cfg.IntOpt('bind_port',
default=9292,
help='Port number to listen on')
]
Options can be strings, integers, floats, booleans, lists or 'multi strings'::
enabled_apis_opt = cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute'],
help='List of APIs to enable by default')
DEFAULT_EXTENSIONS = [
'nova.api.openstack.compute.contrib.standard_extensions'
]
osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension',
default=DEFAULT_EXTENSIONS)
Option schemas are registered with the config manager at runtime, but before
the option is referenced::
class ExtensionManager(object):
enabled_apis_opt = cfg.ListOpt(...)
def __init__(self, conf):
self.conf = conf
self.conf.register_opt(enabled_apis_opt)
...
def _load_extensions(self):
for ext_factory in self.conf.osapi_compute_extension:
....
A common usage pattern is for each option schema to be defined in the module or
class which uses the option::
opts = ...
def add_common_opts(conf):
conf.register_opts(opts)
def get_bind_host(conf):
return conf.bind_host
def get_bind_port(conf):
return conf.bind_port
An option may optionally be made available via the command line. Such options
must registered with the config manager before the command line is parsed (for
the purposes of --help and CLI arg validation)::
cli_opts = [
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output'),
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output'),
]
def add_common_opts(conf):
conf.register_cli_opts(cli_opts)
The config manager has two CLI options defined by default, --config-file
and --config-dir::
class ConfigOpts(object):
def __call__(self, ...):
opts = [
MultiStrOpt('config-file',
...),
StrOpt('config-dir',
...),
]
self.register_cli_opts(opts)
Option values are parsed from any supplied config files using
openstack.common.iniparser. If none are specified, a default set is used
e.g. glance-api.conf and glance-common.conf::
glance-api.conf:
[DEFAULT]
bind_port = 9292
glance-common.conf:
[DEFAULT]
bind_host = 0.0.0.0
Option values in config files override those on the command line. Config files
are parsed in order, with values in later files overriding those in earlier
files.
The parsing of CLI args and config files is initiated by invoking the config
manager e.g.::
conf = ConfigOpts()
conf.register_opt(BoolOpt('verbose', ...))
conf(sys.argv[1:])
if conf.verbose:
...
Options can be registered as belonging to a group::
rabbit_group = cfg.OptGroup(name='rabbit',
title='RabbitMQ options')
rabbit_host_opt = cfg.StrOpt('host',
default='localhost',
help='IP/hostname to listen on'),
rabbit_port_opt = cfg.IntOpt('port',
default=5672,
help='Port number to listen on')
def register_rabbit_opts(conf):
conf.register_group(rabbit_group)
# options can be registered under a group in either of these ways:
conf.register_opt(rabbit_host_opt, group=rabbit_group)
conf.register_opt(rabbit_port_opt, group='rabbit')
If it no group attributes are required other than the group name, the group
need not be explicitly registered e.g.
def register_rabbit_opts(conf):
# The group will automatically be created, equivalent calling::
# conf.register_group(OptGroup(name='rabbit'))
conf.register_opt(rabbit_port_opt, group='rabbit')
If no group is specified, options belong to the 'DEFAULT' section of config
files::
glance-api.conf:
[DEFAULT]
bind_port = 9292
...
[rabbit]
host = localhost
port = 5672
use_ssl = False
userid = guest
password = guest
virtual_host = /
Command-line options in a group are automatically prefixed with the
group name::
--rabbit-host localhost --rabbit-port 9999
Option values in the default group are referenced as attributes/properties on
the config manager; groups are also attributes on the config manager, with
attributes for each of the options associated with the group::
server.start(app, conf.bind_port, conf.bind_host, conf)
self.connection = kombu.connection.BrokerConnection(
hostname=conf.rabbit.host,
port=conf.rabbit.port,
...)
Option values may reference other values using PEP 292 string substitution::
opts = [
cfg.StrOpt('state_path',
default=os.path.join(os.path.dirname(__file__), '../'),
help='Top-level directory for maintaining nova state'),
cfg.StrOpt('sqlite_db',
default='nova.sqlite',
help='file name for sqlite'),
cfg.StrOpt('sql_connection',
default='sqlite:///$state_path/$sqlite_db',
help='connection string for sql database'),
]
Note that interpolation can be avoided by using '$$'.
Options may be declared as required so that an error is raised if the user
does not supply a value for the option.
Options may be declared as secret so that their values are not leaked into
log files::
opts = [
cfg.StrOpt('s3_store_access_key', secret=True),
cfg.StrOpt('s3_store_secret_key', secret=True),
...
]
This module also contains a global instance of the CommonConfigOpts class
in order to support a common usage pattern in OpenStack::
from nova.openstack.common import cfg
opts = [
cfg.StrOpt('bind_host', default='0.0.0.0'),
cfg.IntOpt('bind_port', default=9292),
]
CONF = cfg.CONF
CONF.register_opts(opts)
def start(server, app):
server.start(app, CONF.bind_port, CONF.bind_host)
Positional command line arguments are supported via a 'positional' Opt
constructor argument::
>>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True))
True
>>> CONF(['a', 'b'])
>>> CONF.bar
['a', 'b']
It is also possible to use argparse "sub-parsers" to parse additional
command line arguments using the SubCommandOpt class:
>>> def add_parsers(subparsers):
... list_action = subparsers.add_parser('list')
... list_action.add_argument('id')
...
>>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
True
>>> CONF(['list', '10'])
>>> CONF.action.name, CONF.action.id
('list', '10')
"""
import argparse
import collections
import copy
import functools
import glob
import os
import string
import sys
from nova.openstack.common import iniparser
class Error(Exception):
"""Base class for cfg exceptions."""
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return self.msg
class ArgsAlreadyParsedError(Error):
"""Raised if a CLI opt is registered after parsing."""
def __str__(self):
ret = "arguments already parsed"
if self.msg:
ret += ": " + self.msg
return ret
class NoSuchOptError(Error, AttributeError):
"""Raised if an opt which doesn't exist is referenced."""
def __init__(self, opt_name, group=None):
self.opt_name = opt_name
self.group = group
def __str__(self):
if self.group is None:
return "no such option: %s" % self.opt_name
else:
return "no such option in group %s: %s" % (self.group.name,
self.opt_name)
class NoSuchGroupError(Error):
"""Raised if a group which doesn't exist is referenced."""
def __init__(self, group_name):
self.group_name = group_name
def __str__(self):
return "no such group: %s" % self.group_name
class DuplicateOptError(Error):
"""Raised if multiple opts with the same name are registered."""
def __init__(self, opt_name):
self.opt_name = opt_name
def __str__(self):
return "duplicate option: %s" % self.opt_name
class RequiredOptError(Error):
"""Raised if an option is required but no value is supplied by the user."""
def __init__(self, opt_name, group=None):
self.opt_name = opt_name
self.group = group
def __str__(self):
if self.group is None:
return "value required for option: %s" % self.opt_name
else:
return "value required for option: %s.%s" % (self.group.name,
self.opt_name)
class TemplateSubstitutionError(Error):
"""Raised if an error occurs substituting a variable in an opt value."""
def __str__(self):
return "template substitution error: %s" % self.msg
class ConfigFilesNotFoundError(Error):
"""Raised if one or more config files are not found."""
def __init__(self, config_files):
self.config_files = config_files
def __str__(self):
return ('Failed to read some config files: %s' %
string.join(self.config_files, ','))
class ConfigFileParseError(Error):
"""Raised if there is an error parsing a config file."""
def __init__(self, config_file, msg):
self.config_file = config_file
self.msg = msg
def __str__(self):
return 'Failed to parse %s: %s' % (self.config_file, self.msg)
class ConfigFileValueError(Error):
"""Raised if a config file value does not match its opt type."""
pass
def _fixpath(p):
"""Apply tilde expansion and absolutization to a path."""
return os.path.abspath(os.path.expanduser(p))
def _get_config_dirs(project=None):
"""Return a list of directors where config files may be located.
:param project: an optional project name
If a project is specified, following directories are returned::
~/.${project}/
~/
/etc/${project}/
/etc/
Otherwise, these directories::
~/
/etc/
"""
cfg_dirs = [
_fixpath(os.path.join('~', '.' + project)) if project else None,
_fixpath('~'),
os.path.join('/etc', project) if project else None,
'/etc'
]
return filter(bool, cfg_dirs)
def _search_dirs(dirs, basename, extension=""):
"""Search a list of directories for a given filename.
Iterator over the supplied directories, returning the first file
found with the supplied name and extension.
:param dirs: a list of directories
:param basename: the filename, e.g. 'glance-api'
:param extension: the file extension, e.g. '.conf'
:returns: the path to a matching file, or None
"""
for d in dirs:
path = os.path.join(d, '%s%s' % (basename, extension))
if os.path.exists(path):
return path
def find_config_files(project=None, prog=None, extension='.conf'):
"""Return a list of default configuration files.
:param project: an optional project name
:param prog: the program name, defaulting to the basename of sys.argv[0]
:param extension: the type of the config file
We default to two config files: [${project}.conf, ${prog}.conf]
And we look for those config files in the following directories::
~/.${project}/
~/
/etc/${project}/
/etc/
We return an absolute path for (at most) one of each the default config
files, for the topmost directory it exists in.
For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf
and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf',
'~/.foo/bar.conf']
If no project name is supplied, we only look for ${prog.conf}.
"""
if prog is None:
prog = os.path.basename(sys.argv[0])
cfg_dirs = _get_config_dirs(project)
config_files = []
if project:
config_files.append(_search_dirs(cfg_dirs, project, extension))
config_files.append(_search_dirs(cfg_dirs, prog, extension))
return filter(bool, config_files)
def _is_opt_registered(opts, opt):
"""Check whether an opt with the same name is already registered.
The same opt may be registered multiple times, with only the first
registration having any effect. However, it is an error to attempt
to register a different opt with the same name.
:param opts: the set of opts already registered
:param opt: the opt to be registered
:returns: True if the opt was previously registered, False otherwise
:raises: DuplicateOptError if a naming conflict is detected
"""
if opt.dest in opts:
if opts[opt.dest]['opt'] != opt:
raise DuplicateOptError(opt.name)
return True
else:
return False
def set_defaults(opts, **kwargs):
for opt in opts:
if opt.dest in kwargs:
opt.default = kwargs[opt.dest]
break
class Opt(object):
"""Base class for all configuration options.
An Opt object has no public methods, but has a number of public string
properties:
name:
the name of the option, which may include hyphens
dest:
the (hyphen-less) ConfigOpts property which contains the option value
short:
a single character CLI option name
default:
the default value of the option
positional:
True if the option is a positional CLI argument
metavar:
the name shown as the argument to a CLI option in --help output
help:
an string explaining how the options value is used
"""
multi = False
def __init__(self, name, dest=None, short=None, default=None,
positional=False, metavar=None, help=None,
secret=False, required=False, deprecated_name=None):
"""Construct an Opt object.
The only required parameter is the option's name. However, it is
common to also supply a default and help string for all options.
:param name: the option's name
:param dest: the name of the corresponding ConfigOpts property
:param short: a single character CLI option name
:param default: the default value of the option
:param positional: True if the option is a positional CLI argument
:param metavar: the option argument to show in --help
:param help: an explanation of how the option is used
:param secret: true iff the value should be obfuscated in log output
:param required: true iff a value must be supplied for this option
:param deprecated_name: deprecated name option. Acts like an alias
"""
self.name = name
if dest is None:
self.dest = self.name.replace('-', '_')
else:
self.dest = dest
self.short = short
self.default = default
self.positional = positional
self.metavar = metavar
self.help = help
self.secret = secret
self.required = required
if deprecated_name is not None:
self.deprecated_name = deprecated_name.replace('-', '_')
else:
self.deprecated_name = None
def __ne__(self, another):
return vars(self) != vars(another)
def _get_from_config_parser(self, cparser, section):
"""Retrieves the option value from a MultiConfigParser object.
This is the method ConfigOpts uses to look up the option value from
config files. Most opt types override this method in order to perform
type appropriate conversion of the returned value.
:param cparser: a ConfigParser object
:param section: a section name
"""
return self._cparser_get_with_deprecated(cparser, section)
def _cparser_get_with_deprecated(self, cparser, section):
"""If cannot find option as dest try deprecated_name alias."""
if self.deprecated_name is not None:
return cparser.get(section, [self.dest, self.deprecated_name])
return cparser.get(section, [self.dest])
def _add_to_cli(self, parser, group=None):
"""Makes the option available in the command line interface.
This is the method ConfigOpts uses to add the opt to the CLI interface
as appropriate for the opt type. Some opt types may extend this method,
others may just extend the helper methods it uses.
:param parser: the CLI option parser
:param group: an optional OptGroup object
"""
container = self._get_argparse_container(parser, group)
kwargs = self._get_argparse_kwargs(group)
prefix = self._get_argparse_prefix('', group)
self._add_to_argparse(container, self.name, self.short, kwargs, prefix,
self.positional, self.deprecated_name)
def _add_to_argparse(self, container, name, short, kwargs, prefix='',
positional=False, deprecated_name=None):
"""Add an option to an argparse parser or group.
:param container: an argparse._ArgumentGroup object
:param name: the opt name
:param short: the short opt name
:param kwargs: the keyword arguments for add_argument()
:param prefix: an optional prefix to prepend to the opt name
:param position: whether the optional is a positional CLI argument
:raises: DuplicateOptError if a naming confict is detected
"""
def hyphen(arg):
return arg if not positional else ''
args = [hyphen('--') + prefix + name]
if short:
args.append(hyphen('-') + short)
if deprecated_name:
args.append(hyphen('--') + prefix + deprecated_name)
try:
container.add_argument(*args, **kwargs)
except argparse.ArgumentError as e:
raise DuplicateOptError(e)
def _get_argparse_container(self, parser, group):
"""Returns an argparse._ArgumentGroup.
:param parser: an argparse.ArgumentParser
:param group: an (optional) OptGroup object
:returns: an argparse._ArgumentGroup if group is given, else parser
"""
if group is not None:
return group._get_argparse_group(parser)
else:
return parser
def _get_argparse_kwargs(self, group, **kwargs):
"""Build a dict of keyword arguments for argparse's add_argument().
Most opt types extend this method to customize the behaviour of the
options added to argparse.
:param group: an optional group
:param kwargs: optional keyword arguments to add to
:returns: a dict of keyword arguments
"""
if not self.positional:
dest = self.dest
if group is not None:
dest = group.name + '_' + dest
kwargs['dest'] = dest
else:
kwargs['nargs'] = '?'
kwargs.update({'default': None,
'metavar': self.metavar,
'help': self.help, })
return kwargs
def _get_argparse_prefix(self, prefix, group):
"""Build a prefix for the CLI option name, if required.
CLI options in a group are prefixed with the group's name in order
to avoid conflicts between similarly named options in different
groups.
:param prefix: an existing prefix to append to (e.g. 'no' or '')
:param group: an optional OptGroup object
:returns: a CLI option prefix including the group name, if appropriate
"""
if group is not None:
return group.name + '-' + prefix
else:
return prefix
class StrOpt(Opt):
"""
String opts do not have their values transformed and are returned as
str objects.
"""
pass
class BoolOpt(Opt):
"""
Bool opts are set to True or False on the command line using --optname or
--noopttname respectively.
In config files, boolean values are case insensitive and can be set using
1/0, yes/no, true/false or on/off.
"""
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, *args, **kwargs):
if 'positional' in kwargs:
raise ValueError('positional boolean args not supported')
super(BoolOpt, self).__init__(*args, **kwargs)
def _get_from_config_parser(self, cparser, section):
"""Retrieve the opt value as a boolean from ConfigParser."""
def convert_bool(v):
value = self._boolean_states.get(v.lower())
if value is None:
raise ValueError('Unexpected boolean value %r' % v)
return value
return [convert_bool(v) for v in
self._cparser_get_with_deprecated(cparser, section)]
def _add_to_cli(self, parser, group=None):
"""Extends the base class method to add the --nooptname option."""
super(BoolOpt, self)._add_to_cli(parser, group)
self._add_inverse_to_argparse(parser, group)
def _add_inverse_to_argparse(self, parser, group):
"""Add the --nooptname option to the option parser."""
container = self._get_argparse_container(parser, group)
kwargs = self._get_argparse_kwargs(group, action='store_false')
prefix = self._get_argparse_prefix('no', group)
kwargs["help"] = "The inverse of --" + self.name
self._add_to_argparse(container, self.name, None, kwargs, prefix,
self.positional, self.deprecated_name)
def _get_argparse_kwargs(self, group, action='store_true', **kwargs):
"""Extends the base argparse keyword dict for boolean options."""
kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs)
# metavar has no effect for BoolOpt
if 'metavar' in kwargs:
del kwargs['metavar']
if action != 'store_true':
action = 'store_false'
kwargs['action'] = action
return kwargs
class IntOpt(Opt):
"""Int opt values are converted to integers using the int() builtin."""
def _get_from_config_parser(self, cparser, section):
"""Retrieve the opt value as a integer from ConfigParser."""
return [int(v) for v in self._cparser_get_with_deprecated(cparser,
section)]
def _get_argparse_kwargs(self, group, **kwargs):
"""Extends the base argparse keyword dict for integer options."""
return super(IntOpt,
self)._get_argparse_kwargs(group, type=int, **kwargs)
class FloatOpt(Opt):
"""Float opt values are converted to floats using the float() builtin."""
def _get_from_config_parser(self, cparser, section):
"""Retrieve the opt value as a float from ConfigParser."""
return [float(v) for v in
self._cparser_get_with_deprecated(cparser, section)]
def _get_argparse_kwargs(self, group, **kwargs):
"""Extends the base argparse keyword dict for float options."""
return super(FloatOpt, self)._get_argparse_kwargs(group,
type=float, **kwargs)
class ListOpt(Opt):
"""
List opt values are simple string values separated by commas. The opt value
is a list containing these strings.
"""
class _StoreListAction(argparse.Action):
"""
An argparse action for parsing an option value into a list.
"""
def __call__(self, parser, namespace, values, option_string=None):
if values is not None:
values = [a.strip() for a in values.split(',')]
setattr(namespace, self.dest, values)
def _get_from_config_parser(self, cparser, section):
"""Retrieve the opt value as a list from ConfigParser."""
return [[a.strip() for a in v.split(',')] for v in
self._cparser_get_with_deprecated(cparser, section)]
def _get_argparse_kwargs(self, group, **kwargs):
"""Extends the base argparse keyword dict for list options."""
return Opt._get_argparse_kwargs(self,
group,
action=ListOpt._StoreListAction,
**kwargs)
class MultiStrOpt(Opt):
"""
Multistr opt values are string opts which may be specified multiple times.
The opt value is a list containing all the string values specified.
"""
multi = True
def _get_argparse_kwargs(self, group, **kwargs):
"""Extends the base argparse keyword dict for multi str options."""
kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group)
if not self.positional:
kwargs['action'] = 'append'
else:
kwargs['nargs'] = '*'
return kwargs
def _cparser_get_with_deprecated(self, cparser, section):
"""If cannot find option as dest try deprecated_name alias."""
if self.deprecated_name is not None:
return cparser.get(section, [self.dest, self.deprecated_name],
multi=True)
return cparser.get(section, [self.dest], multi=True)
class SubCommandOpt(Opt):
"""
Sub-command options allow argparse sub-parsers to be used to parse
additional command line arguments.
The handler argument to the SubCommandOpt contructor is a callable
which is supplied an argparse subparsers object. Use this handler
callable to add sub-parsers.
The opt value is SubCommandAttr object with the name of the chosen
sub-parser stored in the 'name' attribute and the values of other
sub-parser arguments available as additional attributes.
"""
def __init__(self, name, dest=None, handler=None,
title=None, description=None, help=None):
"""Construct an sub-command parsing option.
This behaves similarly to other Opt sub-classes but adds a
'handler' argument. The handler is a callable which is supplied
an subparsers object when invoked. The add_parser() method on
this subparsers object can be used to register parsers for
sub-commands.
:param name: the option's name
:param dest: the name of the corresponding ConfigOpts property
:param title: title of the sub-commands group in help output
:param description: description of the group in help output
:param help: a help string giving an overview of available sub-commands
"""
super(SubCommandOpt, self).__init__(name, dest=dest, help=help)
self.handler = handler
self.title = title
self.description = description
def _add_to_cli(self, parser, group=None):
"""Add argparse sub-parsers and invoke the handler method."""
dest = self.dest
if group is not None:
dest = group.name + '_' + dest
subparsers = parser.add_subparsers(dest=dest,
title=self.title,
description=self.description,
help=self.help)
if not self.handler is None:
self.handler(subparsers)
class OptGroup(object):
"""
Represents a group of opts.
CLI opts in the group are automatically prefixed with the group name.
Each group corresponds to a section in config files.
An OptGroup object has no public methods, but has a number of public string
properties:
name:
the name of the group
title:
the group title as displayed in --help
help:
the group description as displayed in --help
"""
def __init__(self, name, title=None, help=None):
"""Constructs an OptGroup object.
:param name: the group name
:param title: the group title for --help
:param help: the group description for --help
"""
self.name = name
if title is None:
self.title = "%s options" % title
else:
self.title = title
self.help = help
self._opts = {} # dict of dicts of (opt:, override:, default:)
self._argparse_group = None
def _register_opt(self, opt, cli=False):
"""Add an opt to this group.
:param opt: an Opt object
:param cli: whether this is a CLI option
:returns: False if previously registered, True otherwise
:raises: DuplicateOptError if a naming conflict is detected
"""
if _is_opt_registered(self._opts, opt):
return False
self._opts[opt.dest] = {'opt': opt, 'cli': cli}
return True
def _unregister_opt(self, opt):
"""Remove an opt from this group.
:param opt: an Opt object
"""
if opt.dest in self._opts:
del self._opts[opt.dest]
def _get_argparse_group(self, parser):
if self._argparse_group is None:
"""Build an argparse._ArgumentGroup for this group."""
self._argparse_group = parser.add_argument_group(self.title,
self.help)
return self._argparse_group
def _clear(self):
"""Clear this group's option parsing state."""
self._argparse_group = None
class ParseError(iniparser.ParseError):
def __init__(self, msg, lineno, line, filename):
super(ParseError, self).__init__(msg, lineno, line)
self.filename = filename
def __str__(self):
return 'at %s:%d, %s: %r' % (self.filename, self.lineno,
self.msg, self.line)
class ConfigParser(iniparser.BaseParser):
def __init__(self, filename, sections):
super(ConfigParser, self).__init__()
self.filename = filename
self.sections = sections
self.section = None
def parse(self):
with open(self.filename) as f:
return super(ConfigParser, self).parse(f)
def new_section(self, section):
self.section = section
self.sections.setdefault(self.section, {})
def assignment(self, key, value):
if not self.section:
raise self.error_no_section()
self.sections[self.section].setdefault(key, [])
self.sections[self.section][key].append('\n'.join(value))
def parse_exc(self, msg, lineno, line=None):
return ParseError(msg, lineno, line, self.filename)
def error_no_section(self):
return self.parse_exc('Section must be started before assignment',
self.lineno)
class MultiConfigParser(object):
def __init__(self):
self.parsed = []
def read(self, config_files):
read_ok = []
for filename in config_files:
sections = {}
parser = ConfigParser(filename, sections)
try:
parser.parse()
except IOError:
continue
self.parsed.insert(0, sections)
read_ok.append(filename)
return read_ok
def get(self, section, names, multi=False):
rvalue = []
for sections in self.parsed:
if section not in sections:
continue
for name in names:
if name in sections[section]:
if multi:
rvalue = sections[section][name] + rvalue
else:
return sections[section][name]
if multi and rvalue != []:
return rvalue
raise KeyError
class ConfigOpts(collections.Mapping):
"""
Config options which may be set on the command line or in config files.
ConfigOpts is a configuration option manager with APIs for registering
option schemas, grouping options, parsing option values and retrieving
the values of options.
"""
def __init__(self):
"""Construct a ConfigOpts object."""
self._opts = {} # dict of dicts of (opt:, override:, default:)
self._groups = {}
self._args = None
self._oparser = None
self._cparser = None
self._cli_values = {}
self.__cache = {}
self._config_opts = []
def _pre_setup(self, project, prog, version, usage, default_config_files):
"""Initialize a ConfigCliParser object for option parsing."""
if prog is None:
prog = os.path.basename(sys.argv[0])
if default_config_files is None:
default_config_files = find_config_files(project, prog)
self._oparser = argparse.ArgumentParser(prog=prog, usage=usage)
self._oparser.add_argument('--version',
action='version',
version=version)
return prog, default_config_files
def _setup(self, project, prog, version, usage, default_config_files):
"""Initialize a ConfigOpts object for option parsing."""
self._config_opts = [
MultiStrOpt('config-file',
default=default_config_files,
metavar='PATH',
help='Path to a config file to use. Multiple config '
'files can be specified, with values in later '
'files taking precedence. The default files '
' used are: %s' % (default_config_files, )),
StrOpt('config-dir',
metavar='DIR',
help='Path to a config directory to pull *.conf '
'files from. This file set is sorted, so as to '
'provide a predictable parse order if individual '
'options are over-ridden. The set is parsed after '
'the file(s), if any, specified via --config-file, '
'hence over-ridden options in the directory take '
'precedence.'),
]
self.register_cli_opts(self._config_opts)
self.project = project
self.prog = prog
self.version = version
self.usage = usage
self.default_config_files = default_config_files
def __clear_cache(f):
@functools.wraps(f)
def __inner(self, *args, **kwargs):
if kwargs.pop('clear_cache', True):
self.__cache.clear()
return f(self, *args, **kwargs)
return __inner
def __call__(self,
args=None,
project=None,
prog=None,
version=None,
usage=None,
default_config_files=None):
"""Parse command line arguments and config files.
Calling a ConfigOpts object causes the supplied command line arguments
and config files to be parsed, causing opt values to be made available
as attributes of the object.
The object may be called multiple times, each time causing the previous
set of values to be overwritten.
Automatically registers the --config-file option with either a supplied
list of default config files, or a list from find_config_files().
If the --config-dir option is set, any *.conf files from this
directory are pulled in, after all the file(s) specified by the
--config-file option.
:param args: command line arguments (defaults to sys.argv[1:])
:param project: the toplevel project name, used to locate config files
:param prog: the name of the program (defaults to sys.argv[0] basename)
:param version: the program version (for --version)
:param usage: a usage string (%prog will be expanded)
:param default_config_files: config files to use by default
:returns: the list of arguments left over after parsing options
:raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError,
RequiredOptError, DuplicateOptError
"""
self.clear()
prog, default_config_files = self._pre_setup(project,
prog,
version,
usage,
default_config_files)
self._setup(project, prog, version, usage, default_config_files)
self._cli_values = self._parse_cli_opts(args)
self._parse_config_files()
self._check_required_opts()
def __getattr__(self, name):
"""Look up an option value and perform string substitution.
:param name: the opt name (or 'dest', more precisely)
:returns: the option value (after string subsititution) or a GroupAttr
:raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
"""
return self._get(name)
def __getitem__(self, key):
"""Look up an option value and perform string substitution."""
return self.__getattr__(key)
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
return key in self._opts or key in self._groups
def __iter__(self):
"""Iterate over all registered opt and group names."""
for key in self._opts.keys() + self._groups.keys():
yield key
def __len__(self):
"""Return the number of options and option groups."""
return len(self._opts) + len(self._groups)
def reset(self):
"""Clear the object state and unset overrides and defaults."""
self._unset_defaults_and_overrides()
self.clear()
@__clear_cache
def clear(self):
"""Clear the state of the object to before it was called.
Any subparsers added using the add_cli_subparsers() will also be
removed as a side-effect of this method.
"""
self._args = None
self._cli_values.clear()
self._oparser = argparse.ArgumentParser()
self._cparser = None
self.unregister_opts(self._config_opts)
for group in self._groups.values():
group._clear()
@__clear_cache
def register_opt(self, opt, group=None, cli=False):
"""Register an option schema.
Registering an option schema makes any option value which is previously
or subsequently parsed from the command line or config files available
as an attribute of this object.
:param opt: an instance of an Opt sub-class
:param cli: whether this is a CLI option
:param group: an optional OptGroup object or group name
:return: False if the opt was already register, True otherwise
:raises: DuplicateOptError
"""
if group is not None:
group = self._get_group(group, autocreate=True)
return group._register_opt(opt, cli)
if _is_opt_registered(self._opts, opt):
return False
self._opts[opt.dest] = {'opt': opt, 'cli': cli}
return True
@__clear_cache
def register_opts(self, opts, group=None):
"""Register multiple option schemas at once."""
for opt in opts:
self.register_opt(opt, group, clear_cache=False)
@__clear_cache
def register_cli_opt(self, opt, group=None):
"""Register a CLI option schema.
CLI option schemas must be registered before the command line and
config files are parsed. This is to ensure that all CLI options are
show in --help and option validation works as expected.
:param opt: an instance of an Opt sub-class
:param group: an optional OptGroup object or group name
:return: False if the opt was already register, True otherwise
:raises: DuplicateOptError, ArgsAlreadyParsedError
"""
if self._args is not None:
raise ArgsAlreadyParsedError("cannot register CLI option")
return self.register_opt(opt, group, cli=True, clear_cache=False)
@__clear_cache
def register_cli_opts(self, opts, group=None):
"""Register multiple CLI option schemas at once."""
for opt in opts:
self.register_cli_opt(opt, group, clear_cache=False)
def register_group(self, group):
"""Register an option group.
An option group must be registered before options can be registered
with the group.
:param group: an OptGroup object
"""
if group.name in self._groups:
return
self._groups[group.name] = copy.copy(group)
@__clear_cache
def unregister_opt(self, opt, group=None):
"""Unregister an option.
:param opt: an Opt object
:param group: an optional OptGroup object or group name
:raises: ArgsAlreadyParsedError, NoSuchGroupError
"""
if self._args is not None:
raise ArgsAlreadyParsedError("reset before unregistering options")
if group is not None:
self._get_group(group)._unregister_opt(opt)
elif opt.dest in self._opts:
del self._opts[opt.dest]
@__clear_cache
def unregister_opts(self, opts, group=None):
"""Unregister multiple CLI option schemas at once."""
for opt in opts:
self.unregister_opt(opt, group, clear_cache=False)
def import_opt(self, name, module_str, group=None):
"""Import an option definition from a module.
Import a module and check that a given option is registered.
This is intended for use with global configuration objects
like cfg.CONF where modules commonly register options with
CONF at module load time. If one module requires an option
defined by another module it can use this method to explicitly
declare the dependency.
:param name: the name/dest of the opt
:param module_str: the name of a module to import
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
__import__(module_str)
self._get_opt_info(name, group)
@__clear_cache
def set_override(self, name, override, group=None):
"""Override an opt value.
Override the command line, config file and default values of a
given option.
:param name: the name/dest of the opt
:param override: the override value
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
opt_info = self._get_opt_info(name, group)
opt_info['override'] = override
@__clear_cache
def set_default(self, name, default, group=None):
"""Override an opt's default value.
Override the default value of given option. A command line or
config file value will still take precedence over this default.
:param name: the name/dest of the opt
:param default: the default value
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
opt_info = self._get_opt_info(name, group)
opt_info['default'] = default
@__clear_cache
def clear_override(self, name, group=None):
"""Clear an override an opt value.
Clear a previously set override of the command line, config file
and default values of a given option.
:param name: the name/dest of the opt
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
opt_info = self._get_opt_info(name, group)
opt_info.pop('override', None)
@__clear_cache
def clear_default(self, name, group=None):
"""Clear an override an opt's default value.
Clear a previously set override of the default value of given option.
:param name: the name/dest of the opt
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
opt_info = self._get_opt_info(name, group)
opt_info.pop('default', None)
def _all_opt_infos(self):
"""A generator function for iteration opt infos."""
for info in self._opts.values():
yield info, None
for group in self._groups.values():
for info in group._opts.values():
yield info, group
def _all_cli_opts(self):
"""A generator function for iterating CLI opts."""
for info, group in self._all_opt_infos():
if info['cli']:
yield info['opt'], group
def _unset_defaults_and_overrides(self):
"""Unset any default or override on all options."""
for info, group in self._all_opt_infos():
info.pop('default', None)
info.pop('override', None)
def find_file(self, name):
"""Locate a file located alongside the config files.
Search for a file with the supplied basename in the directories
which we have already loaded config files from and other known
configuration directories.
The directory, if any, supplied by the config_dir option is
searched first. Then the config_file option is iterated over
and each of the base directories of the config_files values
are searched. Failing both of these, the standard directories
searched by the module level find_config_files() function is
used. The first matching file is returned.
:param basename: the filename, e.g. 'policy.json'
:returns: the path to a matching file, or None
"""
dirs = []
if self.config_dir:
dirs.append(_fixpath(self.config_dir))
for cf in reversed(self.config_file):
dirs.append(os.path.dirname(_fixpath(cf)))
dirs.extend(_get_config_dirs(self.project))
return _search_dirs(dirs, name)
def log_opt_values(self, logger, lvl):
"""Log the value of all registered opts.
It's often useful for an app to log its configuration to a log file at
startup for debugging. This method dumps to the entire config state to
the supplied logger at a given log level.
:param logger: a logging.Logger object
:param lvl: the log level (e.g. logging.DEBUG) arg to logger.log()
"""
logger.log(lvl, "*" * 80)
logger.log(lvl, "Configuration options gathered from:")
logger.log(lvl, "command line args: %s", self._args)
logger.log(lvl, "config files: %s", self.config_file)
logger.log(lvl, "=" * 80)
def _sanitize(opt, value):
"""Obfuscate values of options declared secret"""
return value if not opt.secret else '*' * len(str(value))
for opt_name in sorted(self._opts):
opt = self._get_opt_info(opt_name)['opt']
logger.log(lvl, "%-30s = %s", opt_name,
_sanitize(opt, getattr(self, opt_name)))
for group_name in self._groups:
group_attr = self.GroupAttr(self, self._get_group(group_name))
for opt_name in sorted(self._groups[group_name]._opts):
opt = self._get_opt_info(opt_name, group_name)['opt']
logger.log(lvl, "%-30s = %s",
"%s.%s" % (group_name, opt_name),
_sanitize(opt, getattr(group_attr, opt_name)))
logger.log(lvl, "*" * 80)
def print_usage(self, file=None):
"""Print the usage message for the current program."""
self._oparser.print_usage(file)
def print_help(self, file=None):
"""Print the help message for the current program."""
self._oparser.print_help(file)
def _get(self, name, group=None):
if isinstance(group, OptGroup):
key = (group.name, name)
else:
key = (group, name)
try:
return self.__cache[key]
except KeyError:
value = self._substitute(self._do_get(name, group))
self.__cache[key] = value
return value
def _do_get(self, name, group=None):
"""Look up an option value.
:param name: the opt name (or 'dest', more precisely)
:param group: an OptGroup
:returns: the option value, or a GroupAttr object
:raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
TemplateSubstitutionError
"""
if group is None and name in self._groups:
return self.GroupAttr(self, self._get_group(name))
info = self._get_opt_info(name, group)
opt = info['opt']
if isinstance(opt, SubCommandOpt):
return self.SubCommandAttr(self, group, opt.dest)
if 'override' in info:
return info['override']
values = []
if self._cparser is not None:
section = group.name if group is not None else 'DEFAULT'
try:
value = opt._get_from_config_parser(self._cparser, section)
except KeyError:
pass
except ValueError as ve:
raise ConfigFileValueError(str(ve))
else:
if not opt.multi:
# No need to continue since the last value wins
return value[-1]
values.extend(value)
name = name if group is None else group.name + '_' + name
value = self._cli_values.get(name)
if value is not None:
if not opt.multi:
return value
# argparse ignores default=None for nargs='*'
if opt.positional and not value:
value = opt.default
return value + values
if values:
return values
if 'default' in info:
return info['default']
return opt.default
def _substitute(self, value):
"""Perform string template substitution.
Substitute any template variables (e.g. $foo, ${bar}) in the supplied
string value(s) with opt values.
:param value: the string value, or list of string values
:returns: the substituted string(s)
"""
if isinstance(value, list):
return [self._substitute(i) for i in value]
elif isinstance(value, str):
tmpl = string.Template(value)
return tmpl.safe_substitute(self.StrSubWrapper(self))
else:
return value
def _get_group(self, group_or_name, autocreate=False):
"""Looks up a OptGroup object.
Helper function to return an OptGroup given a parameter which can
either be the group's name or an OptGroup object.
The OptGroup object returned is from the internal dict of OptGroup
objects, which will be a copy of any OptGroup object that users of
the API have access to.
:param group_or_name: the group's name or the OptGroup object itself
:param autocreate: whether to auto-create the group if it's not found
:raises: NoSuchGroupError
"""
group = group_or_name if isinstance(group_or_name, OptGroup) else None
group_name = group.name if group else group_or_name
if not group_name in self._groups:
if not group is None or not autocreate:
raise NoSuchGroupError(group_name)
self.register_group(OptGroup(name=group_name))
return self._groups[group_name]
def _get_opt_info(self, opt_name, group=None):
"""Return the (opt, override, default) dict for an opt.
:param opt_name: an opt name/dest
:param group: an optional group name or OptGroup object
:raises: NoSuchOptError, NoSuchGroupError
"""
if group is None:
opts = self._opts
else:
group = self._get_group(group)
opts = group._opts
if not opt_name in opts:
raise NoSuchOptError(opt_name, group)
return opts[opt_name]
def _parse_config_files(self):
"""Parse the config files from --config-file and --config-dir.
:raises: ConfigFilesNotFoundError, ConfigFileParseError
"""
config_files = list(self.config_file)
if self.config_dir:
config_dir_glob = os.path.join(self.config_dir, '*.conf')
config_files += sorted(glob.glob(config_dir_glob))
config_files = [_fixpath(p) for p in config_files]
self._cparser = MultiConfigParser()
try:
read_ok = self._cparser.read(config_files)
except iniparser.ParseError as pe:
raise ConfigFileParseError(pe.filename, str(pe))
if read_ok != config_files:
not_read_ok = filter(lambda f: f not in read_ok, config_files)
raise ConfigFilesNotFoundError(not_read_ok)
def _check_required_opts(self):
"""Check that all opts marked as required have values specified.
:raises: RequiredOptError
"""
for info, group in self._all_opt_infos():
opt = info['opt']
if opt.required:
if ('default' in info or 'override' in info):
continue
if self._get(opt.dest, group) is None:
raise RequiredOptError(opt.name, group)
def _parse_cli_opts(self, args):
"""Parse command line options.
Initializes the command line option parser and parses the supplied
command line arguments.
:param args: the command line arguments
:returns: a dict of parsed option values
:raises: SystemExit, DuplicateOptError
"""
self._args = args
for opt, group in self._all_cli_opts():
opt._add_to_cli(self._oparser, group)
return vars(self._oparser.parse_args(args))
class GroupAttr(collections.Mapping):
"""
A helper class representing the option values of a group as a mapping
and attributes.
"""
def __init__(self, conf, group):
"""Construct a GroupAttr object.
:param conf: a ConfigOpts object
:param group: an OptGroup object
"""
self._conf = conf
self._group = group
def __getattr__(self, name):
"""Look up an option value and perform template substitution."""
return self._conf._get(name, self._group)
def __getitem__(self, key):
"""Look up an option value and perform string substitution."""
return self.__getattr__(key)
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
return key in self._group._opts
def __iter__(self):
"""Iterate over all registered opt and group names."""
for key in self._group._opts.keys():
yield key
def __len__(self):
"""Return the number of options and option groups."""
return len(self._group._opts)
class SubCommandAttr(object):
"""
A helper class representing the name and arguments of an argparse
sub-parser.
"""
def __init__(self, conf, group, dest):
"""Construct a SubCommandAttr object.
:param conf: a ConfigOpts object
:param group: an OptGroup object
:param dest: the name of the sub-parser
"""
self._conf = conf
self._group = group
self._dest = dest
def __getattr__(self, name):
"""Look up a sub-parser name or argument value."""
if name == 'name':
name = self._dest
if self._group is not None:
name = self._group.name + '_' + name
return self._conf._cli_values[name]
if name in self._conf:
raise DuplicateOptError(name)
try:
return self._conf._cli_values[name]
except KeyError:
raise NoSuchOptError(name)
class StrSubWrapper(object):
"""
A helper class exposing opt values as a dict for string substitution.
"""
def __init__(self, conf):
"""Construct a StrSubWrapper object.
:param conf: a ConfigOpts object
"""
self.conf = conf
def __getitem__(self, key):
"""Look up an opt value from the ConfigOpts object.
:param key: an opt name
:returns: an opt value
:raises: TemplateSubstitutionError if attribute is a group
"""
value = getattr(self.conf, key)
if isinstance(value, self.conf.GroupAttr):
raise TemplateSubstitutionError(
'substituting group %s not supported' % key)
return value
class CommonConfigOpts(ConfigOpts):
DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
BoolOpt('debug',
short='d',
default=False,
help='Print debugging output'),
BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output'),
]
logging_cli_opts = [
StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
StrOpt('log-format',
default=DEFAULT_LOG_FORMAT,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'Default: %(default)s'),
StrOpt('log-date-format',
default=DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If not set, logging will go to stdout.'),
StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The directory to keep log files in '
'(will be prepended to --log-file)'),
BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
def __init__(self):
super(CommonConfigOpts, self).__init__()
self.register_cli_opts(self.common_cli_opts)
self.register_cli_opts(self.logging_cli_opts)
CONF = CommonConfigOpts()
| {
"content_hash": "346ebed6a7efc2450af2ca9321308175",
"timestamp": "",
"source": "github",
"line_count": 1771,
"max_line_length": 79,
"avg_line_length": 34.04573687182383,
"alnum_prop": 0.5921552367526329,
"repo_name": "fajoy/nova",
"id": "ad1f2a8a69e1ade29f21a12e899372ddce97ac9b",
"size": "60946",
"binary": false,
"copies": "2",
"ref": "refs/heads/grizzly-2",
"path": "nova/openstack/common/cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
} |
from bs4 import BeautifulSoup
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.error import URLError
from urllib.error import HTTPError
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
from urllib2 import URLError
from urllib2 import HTTPError
try:
# For Python 3.0 and later
from urllib.parse import urlparse
except ImportError:
# Fall back to Python 2's urlparse
from urlparse import urlparse
try:
from simplejson import loads, dumps
except ImportError:
from json import loads, dumps
TITLE = "title"
NAME = "name"
ITEMPROP = "itemprop"
URL = "url"
SECURE_URL = "secure_url"
HEIGHT = "height"
WIDTH = "width"
HREF_PROPERTY = "href"
META_TAG = "meta"
LINK_TAG = "link"
SOURCE = "source"
IMAGE = "image"
VIDEO = "video"
TYPE = "type"
CONTENT = "content"
PROPERTY = "property"
DESCRIPTION = "description"
KEYWORDS = "keywords"
THEME_COLOR = "theme-color"
OG = "og:"
DEFAULT_HTML_PARSER = "html5lib"
DEFAULT_HTML5_VIDEO_EMBED = "text/html"
INFORMATION_SPACE = "www."
HTTP_PROTOCOL = "http"
HTTP_PROTOCOL_NORMAL = "http://"
SECURE_HTTP_PROTOCOL = "https://"
class SimpleScraper():
"""docstring for SimpleScraper"""
def get_scraped_data(self, link_to_scrap):
try:
result = {}
if link_to_scrap == "":
return {
"error": "Did not get a valid link"
}
try:
if (link_to_scrap.find(INFORMATION_SPACE) == -1 and link_to_scrap.find(HTTP_PROTOCOL) == -1):
link_to_scrap = HTTP_PROTOCOL_NORMAL + INFORMATION_SPACE + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
# try secure protocol
request_code = requestResult.getcode()
if request_code < 200 and request_code > 400:
link_to_scrap = SECURE_HTTP_PROTOCOL + INFORMATION_SPACE + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
elif (link_to_scrap.find(HTTP_PROTOCOL) == -1):
link_to_scrap = HTTP_PROTOCOL_NORMAL + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
# try secure protocol
request_code = requestResult.getcode()
if request_code < 200 and request_code > 400:
link_to_scrap = SECURE_HTTP_PROTOCOL + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
else:
requestResult = self.__get_request_content(link_to_scrap)
except Exception as e:
return {
"error": "cannot scrap the provided url",
"reason": e.args[0]
}
request_code = requestResult.getcode()
if request_code >= 200 and request_code <= 400:
page = requestResult.read()
soup = BeautifulSoup(page, DEFAULT_HTML_PARSER)
all_meta_tags = soup.find_all(META_TAG)
all_link_tags = soup.find_all(LINK_TAG, {"rel": "canonical"})
default_title = soup.find(TITLE)
for tag in all_meta_tags:
result = self.__verifyTagName(result, tag)
if TITLE not in result and default_title is not None:
result[TITLE] = default_title.contents[0]
result = self.__verifyTagOpenGraph(result, all_meta_tags)
for tag in all_link_tags:
href = tag.get(HREF_PROPERTY)
if href is not None:
if HTTP_PROTOCOL in href:
result[URL] = href
if URL not in result:
result[URL] = link_to_scrap
result[SOURCE] = urlparse(link_to_scrap).netloc
if IMAGE in result:
if result[IMAGE].find(HTTP_PROTOCOL) == -1:
result[IMAGE] = HTTP_PROTOCOL_NORMAL + result[SOURCE] + result[IMAGE]
return result
except StandardError as e:
return {
"error": "cannot scrap the provided url",
"reason": e.args[0]
}
def __get_request_content(self, link):
try:
return urlopen(link)
except URLError as e:
raise Exception (
"cannot get url content %s" % str(e.reason)
)
except HTTPError as e:
raise Exception (
"cannot make http request %s" % str(e.reason)
)
def __verifyTagName(self, result, tag):
tag_content = tag.get(CONTENT)
tag_to_search = tag.get(NAME)
if tag_to_search is None:
tag_to_search = tag.get(PROPERTY)
if tag_to_search is None:
tag_to_search = tag.get(ITEMPROP)
if tag_to_search is not None and tag_content is not None:
if TITLE == tag_to_search.lower() and TITLE not in result:
result[TITLE] = tag_content
if DESCRIPTION == tag_to_search.lower() and DESCRIPTION not in result:
result[DESCRIPTION] = tag_content
if IMAGE == tag_to_search.lower() and IMAGE not in result:
result[IMAGE] = tag_content
return result
def __verifyTagOpenGraph(self, result, all_tags):
open_graph_objects = {}
searching_iter_name = first_sub_element = last_sub_element = last_element = None
for index, tag in enumerate(all_tags):
tag_content = tag.get(CONTENT)
tag_to_search = tag.get(PROPERTY)
if tag_to_search is None:
tag_to_search = tag.get(NAME)
if tag_to_search is None:
tag_to_search = tag.get(ITEMPROP)
if tag_to_search is not None:
if OG in tag_to_search:
first_iteration = tag_to_search.find(":")
second_iteration = tag_to_search.find(":", first_iteration + 1)
if second_iteration == -1:
tag_og_title = tag_to_search.find(TITLE, first_iteration)
if TITLE not in result and tag_og_title != -1 and tag_to_search is not None:
result[TITLE] = tag_content
tag_og_description = tag_to_search.find(DESCRIPTION, first_iteration)
if DESCRIPTION not in result and tag_og_description != -1 and tag_to_search is not None:
result[DESCRIPTION] = tag_content
tag_og_image = tag_to_search.find(IMAGE, first_iteration)
if IMAGE not in result and tag_og_image != -1 and tag_to_search is not None:
result[IMAGE] = tag_content
if tag_og_title != -1 or tag_og_description != -1 or tag_og_image != -1:
open_graph_objects[tag_to_search[first_iteration + 1:]] = tag_content
else:
iter_name = tag_to_search[first_iteration + 1:second_iteration]
if searching_iter_name is None:
searching_iter_name = iter_name
open_graph_objects[searching_iter_name] = []
if iter_name != searching_iter_name:
searching_iter_name = first_sub_element = last_element = last_sub_element = None
else:
sub_element = tag_to_search[second_iteration + 1:]
if first_sub_element is None:
first_sub_element = sub_element
actual_object = {}
actual_object[first_sub_element] = tag_content
elif first_sub_element == sub_element:
open_graph_objects[searching_iter_name].append(actual_object)
actual_object = {}
actual_object[first_sub_element] = tag_content
last_sub_element = last_element
last_element = None
else:
if last_element == last_sub_element and last_sub_element is not None and last_element is not None:
open_graph_objects[searching_iter_name].append(actual_object)
first_sub_element = sub_element
actual_object = {}
actual_object[first_sub_element] = tag_content
else:
last_element = sub_element
actual_object[sub_element] = tag_content
# check for youtube og video properties for embed iframe
if VIDEO in open_graph_objects:
for elem in open_graph_objects[VIDEO]:
if TYPE in elem:
if elem[TYPE] == DEFAULT_HTML5_VIDEO_EMBED:
if SECURE_URL in elem:
iframe = '<iframe src="%s"' % elem[SECURE_URL]
if HEIGHT in elem:
iframe = iframe + ' height="%s"' % elem[HEIGHT]
if WIDTH in elem:
iframe = iframe + ' width="%s"' % elem[WIDTH]
iframe = iframe + '></iframe>'
result["iframe"] = iframe
elif URL in elem:
iframe = "<iframe src=" + elem[URL]
if HEIGHT in elem:
iframe = iframe + ' height="%s"' % elem[HEIGHT]
if WIDTH in elem:
iframe = iframe + ' width="%s"' % elem[WIDTH]
iframe = iframe + '></iframe>'
result["iframe"] = iframe
return result
| {
"content_hash": "4283734c1555c07130f2c4da0cbb3d6a",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 130,
"avg_line_length": 44.65948275862069,
"alnum_prop": 0.5016890261557765,
"repo_name": "ROZ32/pythonScraper",
"id": "1754a55a6a75bd7d4354215f384ecdb620509284",
"size": "10361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplescraper/scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4408"
},
{
"name": "Python",
"bytes": "15068"
}
],
"symlink_target": ""
} |
import argparse
import shlex
class ArgHandler(object):
def __init__(self, line):
self.parser = argparse.ArgumentParser(description='SQLCell arguments')
self.parser.add_argument(
"-e", "--engine",
help='Engine param, specify your connection string: --engine=postgresql://user:password@localhost:5432/mydatabase',
required=False
)
self.parser.add_argument(
"-es", "--engines",
help='add new engines to be aliased and stored for future use without having to specify entire connection string.',
required=False, default=False, action="store_true"
)
self.parser.add_argument(
"-v", "--var",
help='Variable name to write output to: --var=foo',
required=False
)
self.parser.add_argument(
"-bg", "--background",
help='whether to run query in background or not: --background runs in background',
required=False, default=False, action="store_true"
)
self.parser.add_argument(
"-k", "--hook",
help='define shortcuts with the --hook param',
required=False, default=False, action="store_true"
)
self.parser.add_argument(
"-r", "--refresh",
help='refresh engines/hooks by specifying --refresh flag',
required=False, default=False, action="store_true"
)
self.args = self.parser.parse_args(shlex.split(line))
| {
"content_hash": "9924559a024cc5da99c9915e29375777",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 128,
"avg_line_length": 41.486486486486484,
"alnum_prop": 0.5771986970684039,
"repo_name": "tmthyjames/SQLCell",
"id": "97a4102a79e23575b7a5b079032881f68e13106a",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlcell/args.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14519"
}
],
"symlink_target": ""
} |
import Plugin
from common import Task
import Trace
import StringUtils
class TextExporterPlugin(Plugin.ExporterPlugin):
"""
Text trace file writer.
"""
formatName = "text"
def saveTrace(self, trace, traceFile, truncateValues = True, includeSensorData = False):
try:
library = self.analyzer.project.targets["code"].library
except (AttributeError, KeyError):
library = None
task = Task.startTask("text-export", "Formatting text", len(trace.events))
truncateValues = self.analyzer.parseBoolean(truncateValues)
includeSensorData = self.analyzer.parseBoolean(includeSensorData)
maxValueLength = 1024 * 1024
# Describe instrumentation sensors
if includeSensorData:
for name, sensor in sorted(trace.sensors.items()):
print >>traceFile, "%010d %010d @inst %s: %s" % (0, 0, name, sensor.description)
for event in trace.events:
try:
function = self.analyzer.lookupFunction(event)
except:
function = None
# Print out any associated instrumentation data
if includeSensorData:
for key, value in sorted(event.sensorData.items()):
if value:
print >>traceFile, "%010d %010d @inst %s = %s" % (event.seq, event.time, key, value)
# Print out any modified arrays that are not actual parameters for the event.
for array in event.modifiedArrays:
for value in event.values.values():
if isinstance(value, Trace.Array) and value.id == array.id:
break
else:
if truncateValues:
text = StringUtils.ellipsis(array, maxLength = maxValueLength)
else:
text = array
print >>traceFile, "%010d %010d @array 0x%x = %s" % (event.seq, event.time, array.id, text)
args = []
# Print out the parameters
for name, value in event.values.items():
if not name:
continue
if function and library:
value = StringUtils.decorateValue(library, function, name, value)
if truncateValues:
value = StringUtils.ellipsis(value, maxLength = maxValueLength)
args += ["%s=%s" % (name, value)]
print >>traceFile, "%010d %010d %s (%s)" % (event.seq, event.time, event.name, ", ".join(args)),
if None in event.values:
print >>traceFile, "-> %s" % event.values[None], "+%d" % event.duration
else:
print >>traceFile, "+%d" % event.duration
task.step()
| {
"content_hash": "a94aec7b98ff00db06be68d8047c4d28",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 102,
"avg_line_length": 35.945205479452056,
"alnum_prop": 0.5926067073170732,
"repo_name": "skyostil/tracy",
"id": "9e6ac2396f91ea2ad2390a465f24a723872a8546",
"size": "3727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/analyzer/plugins/core/TextFormat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "952865"
},
{
"name": "C++",
"bytes": "165814"
},
{
"name": "Prolog",
"bytes": "554"
},
{
"name": "Python",
"bytes": "1384305"
},
{
"name": "Shell",
"bytes": "4482"
}
],
"symlink_target": ""
} |
"""
This is our basic test running framework based on Twisted's Trial.
Usage Examples:
# to run all the tests
python run_tests.py
# to run a specific test suite imported here
python run_tests.py NodeConnectionTestCase
# to run a specific test imported here
python run_tests.py NodeConnectionTestCase.test_reboot
# to run some test suites elsewhere
python run_tests.py nova.tests.node_unittest
python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase
Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
import __main__
import sys
from nova import vendor
from twisted.scripts import trial as trial_script
from nova import flags
from nova import twistd
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.keeper_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.storage_unittest import *
from nova.tests.users_unittest import *
from nova.tests.datastore_unittest import *
FLAGS = flags.FLAGS
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
config = OptionsClass()
argv = config.parseOptions()
FLAGS.verbose = True
# TODO(termie): these should make a call instead of doing work on import
if FLAGS.fake_tests:
from nova.tests.fake_flags import *
else:
from nova.tests.real_flags import *
if len(argv) == 1 and len(config['tests']) == 0:
# If no tests were specified run the ones imported in this file
# NOTE(termie): "tests" is not a flag, just some Trial related stuff
config['tests'].update(['__main__'])
elif len(config['tests']):
# If we specified tests check first whether they are in __main__
for arg in config['tests']:
key = arg.split('.')[0]
if hasattr(__main__, key):
config['tests'].remove(arg)
config['tests'].add('__main__.%s' % arg)
trial_script._initialDebugSetup(config)
trialRunner = trial_script._makeRunner(config)
suite = trial_script._getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir)
sys.exit(not test_result.wasSuccessful())
| {
"content_hash": "07c224903ad8b937071ee3a447fe58ee",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 32.144578313253014,
"alnum_prop": 0.6829085457271364,
"repo_name": "movmov/cc",
"id": "886ab4bd0bc5495e3529100404c6d277ffbaecc1",
"size": "3322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "run_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table('geo_country', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=120)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=2)),
('mapbox_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
))
db.send_create_signal('geo', ['Country'])
# Adding model 'Region'
db.create_table('geo_region', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=120)),
('mapbox_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Country'])),
))
db.send_create_signal('geo', ['Region'])
# Adding model 'City'
db.create_table('geo_city', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=120)),
('mapbox_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Region'], null=True, blank=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Country'])),
('lat', self.gf('django.db.models.fields.FloatField')()),
('lng', self.gf('django.db.models.fields.FloatField')()),
('w', self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True)),
('s', self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True)),
('e', self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True)),
('n', self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True)),
))
db.send_create_signal('geo', ['City'])
# Adding model 'Geocoding'
db.create_table('geo_geocoding', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('country', self.gf('django.db.models.fields.CharField')(default='', max_length=50)),
('region', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('geo_country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Country'])),
('geo_region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.Region'], null=True, on_delete=models.SET_NULL, blank=True)),
('geo_city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['geo.City'], null=True, on_delete=models.SET_NULL, blank=True)),
))
db.send_create_signal('geo', ['Geocoding'])
def backwards(self, orm):
# Deleting model 'Country'
db.delete_table('geo_country')
# Deleting model 'Region'
db.delete_table('geo_region')
# Deleting model 'City'
db.delete_table('geo_city')
# Deleting model 'Geocoding'
db.delete_table('geo_geocoding')
models = {
'geo.city': {
'Meta': {'object_name': 'City'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Country']"}),
'e': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lng': ('django.db.models.fields.FloatField', [], {}),
'mapbox_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'n': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Region']", 'null': 'True', 'blank': 'True'}),
's': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'w': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'geo.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapbox_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'})
},
'geo.geocoding': {
'Meta': {'object_name': 'Geocoding'},
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'geo_city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.City']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'geo_country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Country']"}),
'geo_region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Region']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'geo.region': {
'Meta': {'object_name': 'Region'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapbox_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'})
}
}
complete_apps = ['geo'] | {
"content_hash": "b0c030157127e16c169b2abcf2de7751",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 171,
"avg_line_length": 60.767857142857146,
"alnum_prop": 0.5650896267998825,
"repo_name": "hoosteeno/mozillians",
"id": "085a96fff3ce48ce2dee70d6958ea2079e169507",
"size": "6830",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mozillians/geo/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "205336"
},
{
"name": "HTML",
"bytes": "160325"
},
{
"name": "JavaScript",
"bytes": "90367"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "8289816"
},
{
"name": "Shell",
"bytes": "7758"
}
],
"symlink_target": ""
} |
""" Sahana Eden Messaging Model
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ChannelModel",
"S3MessageModel",
"S3MessageAttachmentModel",
"S3EmailModel",
"S3MCommonsModel",
"S3ParsingModel",
"S3RSSModel",
"S3SMSModel",
"S3SMSOutboundModel",
"S3MessageSubscriptionModel",
"S3TropoModel",
"S3TwilioModel",
"S3TwitterModel",
"S3TwitterSearchModel",
"S3XFormsModel",
"S3BaseStationModel",
"msg_search_subscription_notifications",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3ChannelModel(S3Model):
"""
Messaging Channels
- all Inbound & Outbound channels for messages are instances of this
super-entity
"""
names = ["msg_channel",
"msg_channel_limit",
"msg_channel_status",
"msg_channel_id",
"msg_channel_enable",
"msg_channel_disable",
"msg_channel_enable_interactive",
"msg_channel_disable_interactive",
"msg_channel_onaccept",
]
def model(self):
T = current.T
db = current.db
define_table = self.define_table
#----------------------------------------------------------------------
# Super entity: msg_channel
#
channel_types = Storage(msg_email_channel = T("Email (Inbound)"),
# @ToDo:
#msg_facebook_channel = T("Facebook"),
msg_mcommons_channel = T("Mobile Commons (Inbound)"),
msg_rss_channel = T("RSS Feed"),
msg_sms_modem_channel = T("SMS Modem"),
msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"),
msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"),
msg_tropo_channel = T("Tropo"),
msg_twilio_channel = T("Twilio (Inbound)"),
msg_twitter_channel = T("Twitter"),
)
tablename = "msg_channel"
self.super_entity(tablename, "channel_id",
channel_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
Field("enabled", "boolean",
#label = T("Enabled?")
#represent = s3_yes_no_represent,
),
# @ToDo: Indicate whether channel can be used for Inbound or Outbound
#Field("inbound", "boolean",
# label = T("Inbound?")),
#Field("outbound", "boolean",
# label = T("Outbound?")),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
# Reusable Field
channel_id = S3ReusableField("channel_id", "reference %s" % tablename,
label = T("Channel"),
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_channel.id")),
represent = S3Represent(lookup=tablename),
ondelete = "SET NULL")
self.add_components(tablename,
msg_channel_status = "channel_id",
)
# ---------------------------------------------------------------------
# Channel Limit
# Used to limit the number of emails sent from the system
# - works by simply recording an entry for the timestamp to be checked against
#
# - currently just used by msg.send_email()
#
tablename = "msg_channel_limit"
define_table(tablename,
# @ToDo: Make it per-channel
#channel_id(),
*s3_timestamp())
# ---------------------------------------------------------------------
# Channel Status
# Used to record errors encountered in the Channel
#
tablename = "msg_channel_status"
define_table(tablename,
channel_id(),
Field("status",
#label = T("Status")
#represent = s3_yes_no_represent,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict(msg_channel_id = channel_id,
msg_channel_enable = self.channel_enable,
msg_channel_disable = self.channel_disable,
msg_channel_enable_interactive = self.channel_enable_interactive,
msg_channel_disable_interactive = self.channel_disable_interactive,
msg_channel_onaccept = self.channel_onaccept,
msg_channel_poll = self.channel_poll,
)
# -----------------------------------------------------------------------------
@staticmethod
def channel_enable(tablename, channel_id):
"""
Enable a Channel
- Schedule a Poll for new messages
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
# Update Instance
record.update_record(enabled = True)
# Update Super
s3db.update_super(table, record)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Channel already enabled"
else:
current.s3task.schedule_task("msg_poll",
args=[tablename, channel_id],
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
return "Channel enabled"
# -----------------------------------------------------------------------------
@staticmethod
def channel_enable_interactive(r, **attr):
"""
Enable a Channel
- Schedule a Poll for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_enable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -----------------------------------------------------------------------------
@staticmethod
def channel_disable(tablename, channel_id):
"""
Disable a Channel
- Remove schedule for Polling for new messages
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
# Update Instance
record.update_record(enabled = False)
# Update Super
s3db.update_super(table, record)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Channel disabled"
else:
return "Channel already disabled"
# -----------------------------------------------------------------------------
@staticmethod
def channel_disable_interactive(r, **attr):
"""
Disable a Channel
- Remove schedule for Polling for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_disable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_channel_disable(form.table._tablename,
form.vars.channel_id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form.vars.channel_id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form.vars.channel_id)
# -----------------------------------------------------------------------------
@staticmethod
def channel_poll(r, **attr):
"""
Poll a Channel for new messages
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.async("msg_poll", args=[tablename, r.record.channel_id])
current.session.confirmation = \
current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them")
if tablename == "msg_email_channel":
fn = "email_inbox"
elif tablename == "msg_mcommons_channel":
fn = "sms_inbox"
elif tablename == "msg_rss_channel":
fn = "rss"
elif tablename == "msg_twilio_channel":
fn = "sms_inbox"
elif tablename == "msg_twitter_channel":
fn = "twitter_inbox"
else:
return "Unsupported channel: %s" % tablename
redirect(URL(f=fn))
# =============================================================================
class S3MessageModel(S3Model):
"""
Messages
"""
names = ["msg_message",
"msg_message_id",
"msg_message_represent",
"msg_outbox",
]
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
configure = self.configure
define_table = self.define_table
# Message priority
msg_priority_opts = {3 : T("High"),
2 : T("Medium"),
1 : T("Low"),
}
# ---------------------------------------------------------------------
# Message Super Entity - all Inbound & Outbound Messages
#
message_types = Storage(msg_email = T("Email"),
msg_rss = T("RSS"),
msg_sms = T("SMS"),
msg_twitter = T("Twitter"),
msg_twitter_result = T("Twitter Search Results"),
)
tablename = "msg_message"
self.super_entity(tablename, "message_id",
message_types,
# Knowing which Channel Incoming Messages
# came in on allows correlation to Outbound
# messages (campaign_message, deployment_alert, etc)
self.msg_channel_id(),
s3_datetime(default="now"),
Field("body", "text",
label = T("Message"),
),
Field("from_address",
label = T("From"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
label = T("Direction")),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
table.instance_type.writable = True
configure(tablename,
list_fields = ["instance_type",
"from_address",
"to_address",
"body",
"inbound",
],
)
# Reusable Field
message_represent = S3Represent(lookup=tablename, fields=["body"])
message_id = S3ReusableField("message_id", "reference %s" % tablename,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_message.id")),
represent = message_represent,
ondelete = "RESTRICT")
self.add_components(tablename,
msg_attachment = "message_id",
deploy_response = "message_id",
)
# ---------------------------------------------------------------------
# Outbound Messages
#
# Show only the supported messaging methods
MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS
# Maximum number of retries to send a message
MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries()
# Valid message outbox statuses
MSG_STATUS_OPTS = {1 : T("Unsent"),
2 : T("Sent"),
3 : T("Draft"),
4 : T("Invalid"),
5 : T("Failed"),
}
opt_msg_status = S3ReusableField("status", "integer",
notnull=True,
requires = IS_IN_SET(MSG_STATUS_OPTS,
zero=None),
default = 1,
label = T("Status"),
represent = lambda opt: \
MSG_STATUS_OPTS.get(opt,
UNKNOWN_OPT))
# Outbox - needs to be separate to Message since a single message
# sent needs different outbox entries for each recipient
tablename = "msg_outbox"
define_table(tablename,
# FK not instance
message_id(),
# Person/Group to send the message out to:
self.super_link("pe_id", "pr_pentity"),
# If set used instead of picking up from pe_id:
Field("address"),
Field("contact_method", length=32,
requires = IS_IN_SET(MSG_CONTACT_OPTS,
zero=None),
default = "EMAIL",
label = T("Contact Method"),
represent = lambda opt: \
MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT)),
opt_msg_status(),
# Used to loop through a PE to get it's members
Field("system_generated", "boolean",
default=False),
# Give up if we can't send after MAX_RETRIES
Field("retries", "integer",
default=MAX_SEND_RETRIES,
readable=False,
writable=False),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
"message_id",
"pe_id",
"status",
],
orderby = "msg_outbox.created_on desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(msg_message_id = message_id,
msg_message_represent = message_represent,
)
# =============================================================================
class S3MessageAttachmentModel(S3Model):
"""
Message Attachments
- link table between msg_message & doc_document
"""
names = ["msg_attachment",
]
def model(self):
# ---------------------------------------------------------------------
#
tablename = "msg_attachment"
self.define_table(tablename,
# FK not instance
self.msg_message_id(),
self.doc_document_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3EmailModel(S3ChannelModel):
"""
Email
InBound Channels
Outbound Email is currently handled via deployment_settings
InBox/OutBox
"""
names = ["msg_email_channel",
"msg_email",
]
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Email Inbound Channels
#
tablename = "msg_email_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("server"),
Field("protocol",
requires = IS_IN_SET(["imap", "pop3"],
zero=None)),
Field("use_ssl", "boolean"),
Field("port", "integer"),
Field("username"),
Field("password", "password", length=64,
readable = False,
requires=IS_NOT_EMPTY()),
# Set true to delete messages from the remote
# inbox after fetching them.
Field("delete_from_server", "boolean"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "email_channel",
method="enable",
action=self.msg_channel_enable_interactive)
set_method("msg", "email_channel",
method="disable",
action=self.msg_channel_disable_interactive)
set_method("msg", "email_channel",
method="poll",
action=self.msg_channel_poll)
# ---------------------------------------------------------------------
# Email Messages: InBox & Outbox
#
sender = current.deployment_settings.get_mail_sender()
tablename = "msg_email"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now"),
Field("subject", length=78, # RFC 2822
label = T("Subject")
),
Field("body", "text",
label = T("Message")
),
Field("from_address", #notnull=True,
default = sender,
label = T("Sender"),
requires = IS_EMAIL()
),
Field("to_address",
label = T("To"),
requires = IS_EMAIL()
),
Field("raw", "text",
readable = False,
writable = False,
label = T("Message Source")
),
Field("inbound", "boolean",
default = False,
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
label = T("Direction")
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_email.date desc",
super_entity = "msg_message",
)
# Components
self.add_components(tablename,
# Used to link to custom tab deploy_response_select_mission:
deploy_mission = {"name": "select",
"link": "deploy_response",
"joinby": "message_id",
"key": "mission_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3MCommonsModel(S3ChannelModel):
"""
Mobile Commons Inbound SMS Settings
- Outbound can use Web API
"""
names = ["msg_mcommons_channel",
]
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
tablename = "msg_mcommons_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("campaign_id", length=128, unique=True,
requires=IS_NOT_EMPTY()),
Field("url",
default = \
"https://secure.mcommons.com/api/messages",
requires = IS_URL()
),
Field("username",
requires=IS_NOT_EMPTY()),
Field("password", "password",
readable = False,
requires=IS_NOT_EMPTY()),
Field("query"),
Field("timestmp", "datetime",
writable=False),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_channel",
onaccept = self.msg_channel_onaccept,
)
set_method("msg", "mcommons_channel",
method="enable",
action=self.msg_channel_enable_interactive)
set_method("msg", "mcommons_channel",
method="disable",
action=self.msg_channel_disable_interactive)
set_method("msg", "mcommons_channel",
method="poll",
action=self.msg_channel_poll)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3ParsingModel(S3Model):
"""
Message Parsing Model
"""
names = ["msg_parser",
"msg_parsing_status",
"msg_session",
"msg_keyword",
"msg_sender",
"msg_parser_enabled",
"msg_parser_enable",
"msg_parser_disable",
"msg_parser_enable_interactive",
"msg_parser_disable_interactive",
]
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
channel_id = self.msg_channel_id
message_id = self.msg_message_id
# ---------------------------------------------------------------------
# Link between Message Channels and Parsers in parser.py
#
tablename = "msg_parser"
define_table(tablename,
# Source
channel_id(ondelete = "CASCADE"),
Field("function_name",
label = T("Parser")),
Field("enabled", "boolean",
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_parser_onaccept,
)
set_method("msg", "parser",
method="enable",
action=self.parser_enable_interactive)
set_method("msg", "parser",
method="disable",
action=self.parser_disable_interactive)
set_method("msg", "parser",
method="parse",
action=self.parser_parse)
# ---------------------------------------------------------------------
# Message parsing status
# - component to core msg_message table
#
tablename = "msg_parsing_status"
define_table(tablename,
# Component, not Instance
message_id(ondelete = "CASCADE"),
# Source
channel_id(ondelete = "CASCADE"),
Field("is_parsed", "boolean",
default = False,
represent = lambda parsed: \
(parsed and [T("Parsed")] or \
[T("Not Parsed")])[0],
label = T("Parsing Status")),
message_id("reply_id",
label = T("Reply"),
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Login sessions for Message Parsing
# - links a from_address with a login until expiry
#
tablename = "msg_session"
define_table(tablename,
Field("from_address"),
Field("email"),
Field("created_datetime", "datetime",
default = current.request.utcnow),
Field("expiration_time", "integer"),
Field("is_expired", "boolean",
default = False),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Keywords for Message Parsing
#
tablename = "msg_keyword"
define_table(tablename,
Field("keyword",
label=T("Keyword")),
# @ToDo: Move this to a link table
self.event_incident_type_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Senders for Message Parsing
# - whitelist / blacklist / prioritise
#
tablename = "msg_sender"
define_table(tablename,
Field("sender",
label=T("Sender")),
# @ToDo: Make pe_id work for this
#self.super_link("pe_id", "pr_pentity"),
Field("priority", "integer",
label=T("Priority")),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict(msg_parser_enabled = self.parser_enabled,
msg_parser_enable = self.parser_enable,
msg_parser_disable = self.parser_disable,
)
# -----------------------------------------------------------------------------
@staticmethod
def parser_parse(r, **attr):
"""
Parse unparsed messages
S3Method for interactive requests
"""
record = r.record
current.s3task.async("msg_parse", args=[record.channel_id, record.function_name])
current.session.confirmation = \
current.T("The parse request has been submitted")
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_enabled(channel_id):
"""
Helper function to see if there is a Parser connected to a Channel
- used to determine whether to populate the msg_parsing_status table
"""
table = current.s3db.msg_parser
record = current.db(table.channel_id == channel_id).select(table.enabled,
limitby=(0, 1),
).first()
if record and record.enabled:
return True
else:
return False
# -------------------------------------------------------------------------
@staticmethod
def parser_enable(id):
"""
Enable a Parser
- Connect a Parser to a Channel
CLI API for shell scripts & to be called by S3Method
@ToDo: Ensure only 1 Parser is connected to any Channel at a time
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
record.update_record(enabled = True)
channel_id = record.channel_id
function_name = record.function_name
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (channel_id, function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Parser already enabled"
else:
current.s3task.schedule_task("msg_parse",
args=[channel_id, function_name],
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
return "Parser enabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_enable_interactive(r, **attr):
"""
Enable a Parser
- Connect a Parser to a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_enable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_disable(id):
"""
Disable a Parser
- Disconnect a Parser from a Channel
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (record.channel_id, record.function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Parser disabled"
else:
return "Parser already disabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_disable_interactive(r, **attr):
"""
Disable a Parser
- Disconnect a Parser from a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_disable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def msg_parser_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_parser_disable(form.vars.id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_parser_enable(form.vars.id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_parser_enable(form.vars.id)
# =============================================================================
class S3RSSModel(S3ChannelModel):
"""
RSS channel
"""
names = ["msg_rss_channel",
"msg_rss"
]
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# RSS Settings for an account
#
tablename = "msg_rss_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name", length=255, unique=True,
label = T("Name"),
),
Field("description",
label = T("Description"),
),
Field("enabled", "boolean",
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("url",
label = T("URL"),
requires = IS_URL(),
),
s3_datetime(label = T("Last Polled"),
writable = False
),
Field("etag",
label = T("ETag"),
writable = False
),
*s3_meta_fields())
self.configure(tablename,
list_fields = ["name",
"description",
"enabled",
"url",
"date",
"channel_status.status",
],
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "rss_channel",
method="enable",
action=self.msg_channel_enable_interactive)
set_method("msg", "rss_channel",
method="disable",
action=self.msg_channel_disable_interactive)
set_method("msg", "rss_channel",
method="poll",
action=self.msg_channel_poll)
# ---------------------------------------------------------------------
# RSS Feed Posts
#
tablename = "msg_rss"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now",
label = T("Published on"),
),
Field("title",
label = T("Title"),
),
Field("body", "text",
label = T("Content"),
),
Field("from_address",
label = T("Link"),
),
# http://pythonhosted.org/feedparser/reference-feed-author_detail.html
Field("author",
label = T("Author"),
),
# http://pythonhosted.org/feedparser/reference-entry-tags.html
Field("tags", "list:string",
label = T("Tags"),
),
self.gis_location_id(),
# Just present for Super Entity
Field("inbound", "boolean",
default = True,
readable = False,
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = self.msg_rss_duplicate,
list_fields = ["channel_id",
"title",
"from_address",
"date",
"body"
],
super_entity = current.s3db.msg_message,
)
# ---------------------------------------------------------------------
return dict()
# ---------------------------------------------------------------------
@staticmethod
def msg_rss_duplicate(item):
"""
Import item deduplication, match by link (from_address)
@param item: the S3ImportItem instance
"""
if item.tablename == "msg_rss":
table = item.table
from_address = item.data.get("from_address")
query = (table.from_address == from_address)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3SMSModel(S3Model):
"""
SMS: Short Message Service
These can be received through a number of different gateways
- MCommons
- Modem (@ToDo: Restore this)
- Tropo
- Twilio
"""
names = ["msg_sms",
]
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# SMS Messages: InBox & Outbox
#
tablename = "msg_sms"
self.define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now"),
Field("body", "text",
# Allow multi-part SMS
#length = 160,
#label = T("Message"),
),
Field("from_address",
#label = T("Sender"),
),
Field("to_address",
#label = T("To"),
),
Field("inbound", "boolean",
default = False,
#represent = lambda direction: \
# (direction and [T("In")] or \
# [T("Out")])[0],
#label = T("Direction")),
),
# Used e.g. for Clickatell
Field("remote_id",
#label = T("Remote ID"),
),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3SMSOutboundModel(S3Model):
"""
SMS: Short Message Service
- Outbound Channels
These can be sent through a number of different gateways
- Modem
- SMTP
- Tropo
- Web API (inc Clickatell, MCommons, mVaayoo)
"""
names = ["msg_sms_outbound_gateway",
"msg_sms_modem_channel",
"msg_sms_webapi_channel",
"msg_sms_smtp_channel",
]
def model(self):
#T = current.T
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
# SMS Outbound Gateway
# - select which gateway is in active use
#
tablename = "msg_sms_outbound_gateway"
define_table(tablename,
Field("outgoing_sms_handler", length=32,
requires = IS_IN_SET(current.msg.GATEWAY_OPTS,
zero=None)),
# @ToDo: Allow selection of different gateways based on Organisation/Branch
#self.org_organisation_id(),
# @ToDo: Allow addition of relevant country code (currently in deployment_settings)
#Field("default_country_code", "integer",
# default=44),
*s3_meta_fields())
# ---------------------------------------------------------------------
# SMS Modem Channel
#
tablename = "msg_sms_modem_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
# Nametag to remember account - To be used later
#Field("account_name"),
Field("modem_port"),
Field("modem_baud", "integer",
default = 115200,
),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# Settings for Web API services
#
# @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options
# + Advanced mode for raw access to real fields
#
tablename = "msg_sms_webapi_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("url",
requires = IS_URL(),
default = "https://api.clickatell.com/http/sendmsg" # Clickatell
#default = "https://secure.mcommons.com/api/send_message" # Mobile Commons
),
Field("parameters",
default="user=yourusername&password=yourpassword&api_id=yourapiid" # Clickatell
#default = "campaign_id=yourid" # Mobile Commons
),
Field("message_variable", "string",
requires = IS_NOT_EMPTY(),
default = "text" # Clickatell
#default = "body" # Mobile Commons
),
Field("to_variable", "string",
requires = IS_NOT_EMPTY(),
default = "to" # Clickatell
#default = "phone_number" # Mobile Commons
),
Field("max_length", "integer",
default = 480, # Clickatell concat 3
),
# If using HTTP Auth (e.g. Mobile Commons)
Field("username"),
Field("password"),
Field("enabled", "boolean",
default = True),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# SMS via SMTP Channel
#
tablename = "msg_sms_smtp_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("address", length=64,
requires=IS_NOT_EMPTY()),
Field("subject", length=64),
Field("enabled", "boolean",
default = True),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3MessageSubscriptionModel(S3Model):
"""
Handle Subscription
- currently this is just for Saved Searches
@ToDo: Deprecate (replaced by s3notify)
"""
names = ["msg_subscription"]
def model(self):
T = current.T
auth = current.auth
# @ToDo: Use msg.CONTACT_OPTS
msg_subscription_mode_opts = {1:T("Email"),
#2:T("SMS"),
#3:T("Email and SMS")
}
# @ToDo: Move this to being a component of the Saved Search
# (so that each search can have it's own subscription options)
# @ToDo: Make Conditional
# @ToDo: CRUD Strings
tablename = "msg_subscription"
self.define_table(tablename,
Field("user_id", "integer",
default = auth.user_id,
requires = IS_NOT_IN_DB(current.db,
"msg_subscription.user_id"),
readable = False,
writable = False
),
Field("subscribe_mode", "integer",
default = 1,
represent = lambda opt: \
msg_subscription_mode_opts.get(opt, None),
readable = False,
requires = IS_IN_SET(msg_subscription_mode_opts,
zero=None)
),
Field("subscription_frequency",
requires = IS_IN_SET(["daily",
"weekly",
"monthly"]),
default = "daily",
),
self.pr_person_id(label = T("Person"),
default = auth.s3_logged_in_person()),
*s3_meta_fields())
self.configure("msg_subscription",
list_fields=["subscribe_mode",
"subscription_frequency"])
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TropoModel(S3Model):
"""
Tropo can be used to send & receive SMS, Twitter & XMPP
https://www.tropo.com
"""
names = ["msg_tropo_channel",
"msg_tropo_scratch",
]
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Tropo Channels
#
tablename = "msg_tropo_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("token_messaging"),
#Field("token_voice"),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_channel",
)
set_method("msg", "tropo_channel",
method="enable",
action=self.msg_channel_enable_interactive)
set_method("msg", "tropo_channel",
method="disable",
action=self.msg_channel_disable_interactive)
set_method("msg", "tropo_channel",
method="poll",
action=self.msg_channel_poll)
# ---------------------------------------------------------------------
# Tropo Scratch pad for outbound messaging
#
tablename = "msg_tropo_scratch"
define_table(tablename,
Field("row_id", "integer"),
Field("message_id", "integer"),
Field("recipient"),
Field("message"),
Field("network"),
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TwilioModel(S3ChannelModel):
"""
Twilio Inbound SMS channel
"""
names = ["msg_twilio_channel",
"msg_twilio_sid",
]
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twilio Channels
#
tablename = "msg_twilio_channel"
define_table(tablename,
# Instance
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("account_name", length=255, unique=True),
Field("url",
default = \
"https://api.twilio.com/2010-04-01/Accounts"
),
Field("account_sid", length=64,
requires = IS_NOT_EMPTY()),
Field("auth_token", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY()),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_channel",
onaccept = self.msg_channel_onaccept,
)
set_method("msg", "twilio_channel",
method="enable",
action=self.msg_channel_enable_interactive)
set_method("msg", "twilio_channel",
method="disable",
action=self.msg_channel_disable_interactive)
set_method("msg", "twilio_channel",
method="poll",
action=self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twilio Message extensions
# - store message sid to know which ones we've already downloaded
#
tablename = "msg_twilio_sid"
define_table(tablename,
# Component not Instance
self.msg_message_id(ondelete = "CASCADE"),
Field("sid"),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TwitterModel(S3Model):
names = ["msg_twitter_channel",
"msg_twitter",
]
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Channel
#
tablename = "msg_twitter_channel"
define_table(tablename,
#Instance
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("twitter_account"),
Field("consumer_key", "password"),
Field("consumer_secret", "password"),
Field("access_token", "password"),
Field("access_token_secret", "password"),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
onaccept = self.msg_channel_onaccept,
#onvalidation = self.twitter_channel_onvalidation
)
set_method("msg", "twitter_channel",
method="enable",
action=self.msg_channel_enable_interactive)
set_method("msg", "twitter_channel",
method="disable",
action=self.msg_channel_disable_interactive)
set_method("msg", "twitter_channel",
method="poll",
action=self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twitter Messages: InBox & Outbox
#
tablename = "msg_twitter"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now",
label = T("Posted on"),
),
Field("body", length=140,
label = T("Message"),
),
Field("from_address", #notnull=True,
label = T("From"),
requires = IS_NOT_EMPTY(),
represent = self.twitter_represent,
),
Field("to_address",
label = T("To"),
represent = self.twitter_represent,
),
Field("inbound", "boolean",
default = False,
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
label = T("Direction"),
),
Field("msg_id", # Twitter Message ID
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_message",
#orderby=~table.priority,
list_fields=["id",
#"priority",
#"category",
"body",
"from_address",
"date",
#"location_id",
],
)
# ---------------------------------------------------------------------
return dict()
# -------------------------------------------------------------------------
@staticmethod
def twitter_represent(nickname, show_link=True):
"""
Represent a Twitter account
"""
if not nickname:
return current.messages["NONE"]
db = current.db
s3db = current.s3db
table = s3db.pr_contact
query = (table.contact_method == "TWITTER") & \
(table.value == nickname)
row = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if row:
repr = s3db.pr_pentity_represent(row.pe_id)
if show_link:
# Assume person
ptable = s3db.pr_person
row = db(ptable.pe_id == row.pe_id).select(ptable.id,
limitby=(0, 1)).first()
if row:
link = URL(c="pr", f="person", args=[row.id])
return A(repr, _href=link)
return repr
else:
return nickname
# -------------------------------------------------------------------------
@staticmethod
def twitter_channel_onvalidation(form):
"""
Complete oauth: take tokens from session + pin from form,
and do the 2nd API call to Twitter
"""
T = current.T
session = current.session
settings = current.deployment_settings.msg
s3 = session.s3
vars = form.vars
if vars.pin and s3.twitter_request_key and s3.twitter_request_secret:
try:
import tweepy
except:
raise HTTP(501, body=T("Can't import tweepy"))
oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key,
settings.twitter_oauth_consumer_secret)
oauth.set_request_token(s3.twitter_request_key,
s3.twitter_request_secret)
try:
oauth.get_access_token(vars.pin)
vars.oauth_key = oauth.access_token.key
vars.oauth_secret = oauth.access_token.secret
twitter = tweepy.API(oauth)
vars.twitter_account = twitter.me().screen_name
vars.pin = "" # we won't need it anymore
return
except tweepy.TweepError:
session.error = T("Settings were reset because authenticating with Twitter failed")
# Either user asked to reset, or error - clear everything
for k in ["oauth_key", "oauth_secret", "twitter_account"]:
vars[k] = None
for k in ["twitter_request_key", "twitter_request_secret"]:
s3[k] = ""
# =============================================================================
class S3TwitterSearchModel(S3ChannelModel):
"""
Twitter Searches
- results can be fed to KeyGraph
"""
names = ["msg_twitter_search",
"msg_twitter_result",
]
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Search Query
#
tablename = "msg_twitter_search"
define_table(tablename,
Field("keywords", "text",
label = T("Keywords"),
),
Field("lang",
# Set in controller
#default = current.response.s3.language,
label = T("Language"),
),
Field("count", "integer",
default = 100,
label = T("# Results per query"),
),
Field("include_entities", "boolean",
default = False,
label = T("Include Entity Information?"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Entity Information"),
T("This is required if analyzing with KeyGraph."))),
),
# @ToDo: Rename or even move to Component Table
Field("is_processed", "boolean",
default = False,
label = T("Processed with KeyGraph?"),
represent = s3_yes_no_represent,
),
Field("is_searched", "boolean",
default = False,
label = T("Searched?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["keywords",
"lang",
"count",
#"include_entities",
],
)
# Reusable Query ID
represent = S3Represent(lookup=tablename, fields=["keywords"])
search_id = S3ReusableField("search_id", "reference %s" % tablename,
label = T("Search Query"),
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_twitter_search.id")
),
represent = represent,
ondelete = "CASCADE")
set_method("msg", "twitter_search",
method="poll",
action=self.twitter_search_poll)
set_method("msg", "twitter_search",
method="keygraph",
action=self.twitter_keygraph)
set_method("msg", "twitter_result",
method="timeline",
action=self.twitter_timeline)
# ---------------------------------------------------------------------
# Twitter Search Results
#
# @ToDo: Store the places mentioned in the Tweet as linked Locations
#
tablename = "msg_twitter_result"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
# Just present for Super Entity
#self.msg_channel_id(),
search_id(),
s3_datetime(default="now",
label = T("Tweeted on"),
),
Field("tweet_id",
label = T("Tweet ID")),
Field("lang",
label = T("Language")),
Field("from_address",
label = T("Tweeted by")),
Field("body",
label = T("Tweet")),
# @ToDo: Populate from Parser
#Field("category",
# writable = False,
# label = T("Category"),
# ),
#Field("priority", "integer",
# writable = False,
# label = T("Priority"),
# ),
self.gis_location_id(),
# Just present for Super Entity
#Field("inbound", "boolean",
# default = True,
# readable = False,
# writable = False,
# ),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_message",
#orderby=~table.priority,
list_fields = [#"category",
#"priority",
"body",
"from_address",
"date",
"location_id",
],
)
# ---------------------------------------------------------------------
return dict()
# -----------------------------------------------------------------------------
@staticmethod
def twitter_search_poll(r, **attr):
"""
Perform a Search of Twitter
S3Method for interactive requests
"""
id = r.id
tablename = r.tablename
current.s3task.async("msg_twitter_search", args=[id])
current.session.confirmation = \
current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them")
# Filter results to this Search
redirect(URL(f="twitter_result",
vars={"~.search_id": id}))
# -----------------------------------------------------------------------------
@staticmethod
def twitter_keygraph(r, **attr):
"""
Prcoess Search Results with KeyGraph
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.async("msg_process_keygraph", args=[r.id])
current.session.confirmation = \
current.T("The search results are now being processed with KeyGraph")
# @ToDo: Link to KeyGraph results
redirect(URL(f="twitter_result"))
# =============================================================================
@staticmethod
def twitter_timeline(r, **attr):
"""
Display the Tweets on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
"""
if r.representation == "html" and r.name == "twitter_result":
response = current.response
s3 = response.s3
appname = r.application
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
rows = r.resource.select(["date", "body"], limit=2000, as_rows=True)
data = {"dateTimeFormat": "iso8601",
}
now = r.utcnow
tl_start = tl_end = now
events = []
import re
for row in rows:
# Dates
start = row.date or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body))
if len(title) > 30:
title = title[:30]
events.append({"start": start,
"title": title,
"description": row.body,
})
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _class="s3-timeline")
output = dict(item=item)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = current.T("Twitter Timeline")
response.view = "timeline.html"
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class S3XFormsModel(S3Model):
"""
XForms are used by the ODK Collect mobile client
http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android
"""
names = ["msg_xforms_store"]
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# SMS store for persistence and scratch pad for combining incoming xform chunks
tablename = "msg_xforms_store"
self.define_table(tablename,
Field("sender", length=20),
Field("fileno", "integer"),
Field("totalno", "integer"),
Field("partno", "integer"),
Field("message", length=160)
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3BaseStationModel(S3Model):
"""
Base Stations (Cell Towers) are a type of Site
@ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain
- see RadioMobile
"""
names = ["msg_basestation"]
def model(self):
T = current.T
define_table = self.define_table
# ---------------------------------------------------------------------
# Base Stations (Cell Towers)
#
tablename = "msg_basestation"
define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label=T("Name")),
Field("code", length=10, # Mayon compatibility
label=T("Code"),
# Deployments that don't wants site codes can hide them
#readable=False,
#writable=False,
# @ToDo: Deployment Setting to add validator to make these unique
),
self.org_organisation_id(
label = T("Operator"),
#widget=S3OrganisationAutocompleteWidget(default_from_profile=True),
requires = self.org_organisation_requires(required=True,
updateable=True),
),
self.gis_location_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_BASE = T("Create Base Station")
current.response.s3.crud_strings[tablename] = Storage(
label_create=T("Create Base Station"),
title_display=T("Base Station Details"),
title_list=T("Base Stations"),
title_update=T("Edit Base Station"),
title_upload=T("Import Base Stations"),
title_map=T("Map of Base Stations"),
label_list_button=T("List Base Stations"),
label_delete_button=T("Delete Base Station"),
msg_record_created=T("Base Station added"),
msg_record_modified=T("Base Station updated"),
msg_record_deleted=T("Base Station deleted"),
msg_list_empty=T("No Base Stations currently registered"))
self.configure(tablename,
deduplicate = self.msg_basestation_duplicate,
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# ---------------------------------------------------------------------
@staticmethod
def msg_basestation_duplicate(item):
"""
Import item deduplication, match by name
(Adding location_id doesn't seem to be a good idea)
@param item: the S3ImportItem instance
"""
if item.tablename == "msg_basestation":
table = item.table
name = "name" in item.data and item.data.name
query = (table.name.lower() == name.lower())
#location_id = None
# if "location_id" in item.data:
# location_id = item.data.location_id
## This doesn't find deleted records:
# query = query & (table.location_id == location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
# if duplicate is None and location_id:
## Search for deleted basestations with this name
# query = (table.name.lower() == name.lower()) & \
# (table.deleted == True)
# row = db(query).select(table.id, table.deleted_fk,
# limitby=(0, 1)).first()
# if row:
# fkeys = json.loads(row.deleted_fk)
# if "location_id" in fkeys and \
# str(fkeys["location_id"]) == str(location_id):
# duplicate = row
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
def msg_search_subscription_notifications(frequency):
"""
Send Notifications for all Subscriptions
- run by Scheduler (models/tasks.py)
@ToDo: Deprecate - replaced by Notifications
"""
s3db = current.s3db
table = s3db.pr_saved_search
if frequency not in dict(table.notification_frequency.requires.options()):
return
db = current.db
searches = db(table.notification_frequency == frequency).select()
if not searches:
return
import urlparse
from urllib import urlencode
from uuid import uuid4
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
loads = json.loads
from gluon.tools import fetch
msg = current.msg
settings = current.deployment_settings
public_url = settings.get_base_public_url()
system_name_short = settings.get_system_name_short()
def send(search, message):
if not message:
return
# Send the email
msg.send_by_pe_id(search.pe_id,
subject="%s Search Notification %s" % \
(system_name_short, search.name),
message=message)
for search in searches:
# Fetch the latest records from the search
# search.url has no host
search_url = "%s%s" % (public_url, search.url)
# Create a temporary token for this search
# that will be used when impersonating users
auth_token = uuid4()
search.update_record(auth_token=auth_token)
# Commit so that when we request via http, then we'll see the change
db.commit()
# Parsed URL, break up the URL into its components
purl = list(urlparse.urlparse(search_url))
if search.notification_batch:
# Send all records in a single notification
# query string parameters to be added to the search URL
page_qs_parms = {
"search_subscription": auth_token,
"%s.modified_on__ge" % (search.resource_name): search.last_checked,
"format": "email",
}
# Turn the parameter list into a URL query string
page_qs = urlencode(page_qs_parms)
# Put the URL back together
page_url = urlparse.urlunparse(
[
purl[0], # scheme
purl[1], # netloc
purl[2], # path
purl[3], # params
"&".join([purl[4], page_qs]), # query
purl[5], # fragment
]
)
message = fetch(page_url)
# Send the email
send(search, message)
else:
# Not batch
# query string parameters to be added to the search URL
page_qs_parms = {
"search_subscription": auth_token,
"%s.modified_on__ge" % (search.resource_name): search.last_checked,
"format": "json",
}
# Turn the parameter list into a URL query string
page_qs = urlencode(page_qs_parms)
# Put the URL back together
page_url = urlparse.urlunparse(
[
purl[0], # scheme
purl[1], # netloc
purl[2], # path
purl[3], # params
"&".join([purl[4], page_qs]), # query
purl[5], # fragment
]
)
# Fetch the record list as json
json_string = fetch(page_url)
if json_string:
records = loads(json_string)
for record in records:
email_qs = urlencode(
{
"search_subscription": auth_token,
"format": "email",
"%s.id__eq" % search.resource_name: record["id"],
}
)
email_url = urlparse.urlunparse(
[
purl[0], # scheme
purl[1], # netloc
purl[2], # path
purl[3], # params
email_qs, # query
purl[5], # fragment
]
)
message = fetch(email_url)
# Send the email
send(search, message)
# Update the saved searches to indicate they've just been checked
# & revoke the temporary token
query = (table.notification_frequency == frequency) & \
(table.deleted != True)
db(query).update(last_checked=datetime.datetime.utcnow(),
auth_token=None,
)
# Explictly commit
db.commit()
# END =========================================================================
| {
"content_hash": "7c8b9bbeba6531bf28ac67a7e5ad1b83",
"timestamp": "",
"source": "github",
"line_count": 2273,
"max_line_length": 141,
"avg_line_length": 38.54025516937968,
"alnum_prop": 0.3987808497522888,
"repo_name": "code-for-india/sahana_shelter_worldbank",
"id": "b33d713d517b41717e924886151f40d6364995d3",
"size": "87627",
"binary": false,
"copies": "1",
"ref": "refs/heads/hackathon",
"path": "modules/s3db/msg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1214342"
},
{
"name": "JavaScript",
"bytes": "16755282"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "27298931"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2245739"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
import pdb
from .models import *
from authentication.serializers import AccountSerializer
class ImageSerializer(serializers.ModelSerializer):
image = serializers.ImageField(max_length=128, use_url=True,)
class Meta:
model = Image
fields = ('title', 'image', 'brew', 'brewery')
def create(self, validated_data):
return Image.objects.create(**validated_data)
class BrewDateSerializer(serializers.ModelSerializer):
class Meta:
model = BrewDate
fields = ('brew', 'date', 'activity')
def create(self, validated_data):
return BrewDate.objects.create(**validated_data)
class BrewerySerializer(serializers.ModelSerializer):
class Meta:
model = Brewery
fields = ('id', 'name', 'description')
read_only_fields = ('id')
def create(self, validated_data):
return Brewery.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
def get_validation_exclusions(self, *args, **kwargs):
exclusions = super(BrewerSerializer, self).get_validation_exclusions()
return exclusions + ['brewery','user']
class BrewerSerializer(serializers.ModelSerializer):
brewery = BrewerySerializer(read_only=True, required=False)
user = AccountSerializer(read_only=True, required=False)
class Meta:
model = Brewer
fields = ('id', 'brewery', 'user')
read_only_fields = ('id')
def create(self, validated_data):
return Brewer.objects.create(**validated_data)
def get_validation_exclusions(self, *args, **kwargs):
exclusions = super(BrewerSerializer, self).get_validation_exclusions()
return exclusions + ['brewery','user']
class BrewSerializer(serializers.ModelSerializer):
brewer = BrewerSerializer(read_only=True, required=False)
images = ImageSerializer(read_only=True, many=True, required=False)
class Meta:
model = Brew
fields = ('id', 'name', 'description', 'style', 'type', 'abv', 'brewer', 'images')
read_only_fields = ('id')
def create(self, validated_data):
return Brew.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
def get_validation_exclusions(self, *args, **kwargs):
exclusions = super(BrewSerializer, self).get_validation_exclusions()
return exclusions + ['brewer']
class KeywordSerializer(serializers.ModelSerializer):
class Meta:
model = Keyword
fields = ('id', 'tasting', 'category', 'key')
read_only_fields = ('id')
def create(self, validated_data):
return Keyword.objects.create(**validated_data)
class TastingSerializer(serializers.ModelSerializer):
brew = BrewSerializer(required=False, read_only=True)
user = AccountSerializer(required=False, read_only=True)
keywords = KeywordSerializer(many=True, required=False, read_only=True)
class Meta:
model = Tasting
fields = ('id', 'brew', 'user', 'appearance', 'smell', 'taste', 'mouthfeel', 'overall', 'keywords')
read_only_fields = ('id')
extra_kwargs = {'brew_id': {'write_only': True}}
def create(self, validated_data):
pdb.set_trace()
brew_id = validated_data.pop('brew_id')
brew = Brew.objects.get(id=brew_id)
return Tasting.objects.create(brew=brew, **validated_data)
def update(self, instance, validated_data):
instance.appearance = validated_data.get('appearance', instance.appearance)
instance.smell = validated_data.get('smell', instance.smell)
instance.taste = validated_data.get('taste', instance.taste)
instance.mouthfeel = validated_data.get('mouthfeel', instance.mouthfeel)
instance.overall = validated_data.get('overall', instance.overall)
instance.keywords = validated_data.get('keywords', instance.keywords)
instance.save()
return instance
| {
"content_hash": "d8a10cdcc2c2431daf5432dbade55dbc",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 107,
"avg_line_length": 37.47107438016529,
"alnum_prop": 0.6444640494044993,
"repo_name": "dmham86/CrowdBrew",
"id": "8773cabffe9d9ff434e2a3ae134e285846790166",
"size": "4534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowd_brew/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4638"
},
{
"name": "HTML",
"bytes": "33262"
},
{
"name": "JavaScript",
"bytes": "76522"
},
{
"name": "Python",
"bytes": "49368"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
import logging
import json
from nose.tools import assert_true, assert_equal, assert_false
from django.utils.encoding import smart_str
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
import hadoop
from desktop.lib.django_test_util import make_logged_in_client, assert_equal_mod_whitespace
from desktop.lib.test_utils import add_permission, grant_access
from useradmin.models import HuePermission, GroupPermission, group_has_permission
from beeswax.conf import BROWSE_PARTITIONED_TABLE_LIMIT
from beeswax.views import collapse_whitespace
from beeswax.test_base import make_query, wait_for_query_to_finish, verify_history, get_query_server_config, fetch_query_result_data
from beeswax.models import QueryHistory
from beeswax.server import dbms
from beeswax.test_base import BeeswaxSampleProvider
LOG = logging.getLogger(__name__)
def _make_query(client, query, submission_type="Execute",
udfs=None, settings=None, resources=[],
wait=False, name=None, desc=None, local=True,
is_parameterized=True, max=30.0, database='default', email_notify=False, **kwargs):
"""Wrapper around the real make_query"""
res = make_query(client, query, submission_type,
udfs, settings, resources,
wait, name, desc, local, is_parameterized, max, database, email_notify, **kwargs)
# Should be in the history if it's submitted.
if submission_type == 'Execute':
fragment = collapse_whitespace(smart_str(query[:20]))
verify_history(client, fragment=fragment)
return res
class TestMetastoreWithHadoop(BeeswaxSampleProvider):
requires_hadoop = True
def setUp(self):
user = User.objects.get(username='test')
self.db = dbms.get(user, get_query_server_config())
add_permission("test", "test", "write", "metastore")
def test_basic_flow(self):
# Default database should exist
response = self.client.get("/metastore/databases")
assert_true(self.db_name in response.context["databases"])
# Table should have been created
response = self.client.get("/metastore/tables/")
assert_equal(200, response.status_code)
# Switch databases
response = self.client.get("/metastore/tables/%s" % self.db_name)
assert_true("test" in response.context["tables"])
# Should default to "default" database
response = self.client.get("/metastore/tables/not_there")
assert_equal(200, response.status_code)
# And have detail
response = self.client.get("/metastore/table/%s/test" % self.db_name)
assert_true("foo" in response.content)
assert_true("SerDe Library" in response.content, response.content)
# Remember the number of history items. Use a generic fragment 'test' to pass verification.
history_cnt = verify_history(self.client, fragment='test')
# Show table data.
response = self.client.get("/metastore/table/%s/test/read" % self.db_name, follow=True)
response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
response = wait_for_query_to_finish(self.client, response, max=30.0)
# Note that it may not return all rows at once. But we expect at least 10.
results = fetch_query_result_data(self.client, response)
assert_true(len(results['results']) > 0)
# This should NOT go into the query history.
assert_equal(verify_history(self.client, fragment='test'), history_cnt, 'Implicit queries should not be saved in the history')
def test_describe_view(self):
resp = self.client.get('/metastore/table/%s/myview' % self.db_name)
assert_equal(None, resp.context['sample'])
assert_true(resp.context['table'].is_view)
assert_true("View" in resp.content)
assert_true("Drop View" in resp.content)
# Breadcrumbs
assert_true(self.db_name in resp.content)
assert_true("myview" in resp.content)
def test_describe_partitions(self):
response = self.client.get("/metastore/table/%s/test_partitions" % self.db_name)
assert_true("Show Partitions (2)" in response.content, response.content)
response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name, follow=True)
assert_true("baz_one" in response.content)
assert_true("boom_two" in response.content)
assert_true("baz_foo" in response.content)
assert_true("boom_bar" in response.content)
# Breadcrumbs
assert_true(self.db_name in response.content)
assert_true("test_partitions" in response.content)
assert_true("partitions" in response.content)
# Not partitioned
response = self.client.get("/metastore/table/%s/test/partitions" % self.db_name, follow=True)
assert_true("is not partitioned." in response.content)
def test_describe_partitioned_table_with_limit(self):
# Limit to 90
finish = BROWSE_PARTITIONED_TABLE_LIMIT.set_for_testing("90")
try:
response = self.client.get("/metastore/table/%s/test_partitions" % self.db_name)
assert_true("0x%x" % 89 in response.content, response.content)
assert_false("0x%x" % 90 in response.content, response.content)
finally:
finish()
def test_read_partitions(self):
response = self.client.get("/metastore/table/%s/test_partitions/partitions/0/read" % self.db_name, follow=True)
response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
response = wait_for_query_to_finish(self.client, response, max=30.0)
results = fetch_query_result_data(self.client, response)
assert_true(len(results['results']) > 0, results)
def test_browse_partition(self):
response = self.client.get("/metastore/table/%s/test_partitions/partitions/1/browse" % self.db_name, follow=True)
filebrowser_path = reverse("filebrowser.views.view", kwargs={'path': '%s/baz_foo/boom_bar' % self.cluster.fs_prefix})
assert_equal(response.request['PATH_INFO'], filebrowser_path)
def test_drop_multi_tables(self):
hql = """
CREATE TABLE test_drop_1 (a int);
CREATE TABLE test_drop_2 (a int);
CREATE TABLE test_drop_3 (a int);
"""
resp = _make_query(self.client, hql, database=self.db_name)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Drop them
resp = self.client.get('/metastore/tables/drop/%s' % self.db_name, follow=True)
assert_true('want to delete' in resp.content, resp.content)
resp = self.client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_drop_1', u'test_drop_2', u'test_drop_3']})
assert_equal(resp.status_code, 302)
def test_drop_multi_databases(self):
db1 = '%s_test_drop_1' % self.db_name
db2 = '%s_test_drop_2' % self.db_name
db3 = '%s_test_drop_3' % self.db_name
try:
hql = """
CREATE DATABASE %(db1)s;
CREATE DATABASE %(db2)s;
CREATE DATABASE %(db3)s;
""" % {'db1': db1, 'db2': db2, 'db3': db3}
resp = _make_query(self.client, hql)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Drop them
resp = self.client.get('/metastore/databases/drop', follow=True)
assert_true('want to delete' in resp.content, resp.content)
resp = self.client.post('/metastore/databases/drop', {u'database_selection': [db1, db2, db3]})
assert_equal(resp.status_code, 302)
finally:
make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db1}, wait=True)
make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db2}, wait=True)
make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db3}, wait=True)
def test_load_data(self):
"""
Test load data queries.
These require Hadoop, because they ask the metastore
about whether a table is partitioned.
"""
# Check that view works
resp = self.client.get("/metastore/table/%s/test/load" % self.db_name, follow=True)
assert_true('Path' in resp.content)
data_path = '%(prefix)s/tmp/foo' % {'prefix': self.cluster.fs_prefix}
# Try the submission
self.client.post("/metastore/table/%s/test/load" % self.db_name, {'path': data_path, 'overwrite': True}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("LOAD DATA INPATH '%(data_path)s' OVERWRITE INTO TABLE `%(db)s`.`test`" % {'data_path': data_path, 'db': self.db_name}, query.query)
resp = self.client.post("/metastore/table/%s/test/load" % self.db_name, {'path': data_path, 'overwrite': False}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("LOAD DATA INPATH '%(data_path)s' INTO TABLE `%(db)s`.`test`" % {'data_path': data_path, 'db': self.db_name}, query.query)
# Try it with partitions
resp = self.client.post("/metastore/table/%s/test_partitions/load" % self.db_name, {'path': data_path, 'partition_0': "alpha", 'partition_1': "beta"}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace(query.query, "LOAD DATA INPATH '%(data_path)s' INTO TABLE `%(db)s`.`test_partitions` PARTITION (baz='alpha', boom='beta')" % {'data_path': data_path, 'db': self.db_name})
def test_has_write_access_frontend(self):
client = make_logged_in_client(username='write_access_frontend', groupname='write_access_frontend', is_superuser=False)
grant_access("write_access_frontend", "write_access_frontend", "metastore")
user = User.objects.get(username='write_access_frontend')
def check(client, assertz):
response = client.get("/metastore/databases")
assertz("Drop</button>" in response.content, response.content)
assertz("Create a new database" in response.content, response.content)
response = client.get("/metastore/tables/")
assertz("Drop</button>" in response.content, response.content)
assertz("Create a new table" in response.content, response.content)
check(client, assert_false)
# Add access
group, created = Group.objects.get_or_create(name='write_access_frontend')
perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
check(client, assert_true)
def test_has_write_access_backend(self):
client = make_logged_in_client(username='write_access_backend', groupname='write_access_backend', is_superuser=False)
grant_access("write_access_backend", "write_access_backend", "metastore")
grant_access("write_access_backend", "write_access_backend", "beeswax")
user = User.objects.get(username='write_access_backend')
resp = _make_query(client, 'CREATE TABLE test_perm_1 (a int);', database=self.db_name) # Only fails if we were using Sentry and won't allow SELECT to user
resp = wait_for_query_to_finish(client, resp, max=30.0)
def check(client, http_codes):
resp = client.get('/metastore/tables/drop/%s' % self.db_name)
assert_true(resp.status_code in http_codes, resp.content)
resp = client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_perm_1']})
assert_true(resp.status_code in http_codes, resp.content)
check(client, [301]) # Denied
# Add access
group, created = Group.objects.get_or_create(name='write_access_backend')
perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
check(client, [200, 302]) # Ok
| {
"content_hash": "e002820d2775a90e19a76c9bd54a4ca9",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 202,
"avg_line_length": 45.76679841897233,
"alnum_prop": 0.6911650401589083,
"repo_name": "javachengwc/hue",
"id": "0fb586efe559940fec7cf46c157ff62407e6552d",
"size": "12395",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/metastore/src/metastore/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2391760"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "423513"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21113316"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "2805608"
},
{
"name": "Makefile",
"bytes": "93726"
},
{
"name": "Mako",
"bytes": "2163111"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "32856304"
},
{
"name": "Scala",
"bytes": "159673"
},
{
"name": "Shell",
"bytes": "51345"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101737"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
} |
import logging
from telemetry.util import perf_tests_helper
from telemetry.util import statistics
from telemetry.value import improvement_direction
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from telemetry.web_perf.metrics import rendering_stats
from telemetry.web_perf.metrics import timeline_based_metric
NOT_ENOUGH_FRAMES_MESSAGE = (
'Not enough frames for smoothness metrics (at least two are required).\n'
'Issues that have caused this in the past:\n'
'- Browser bugs that prevents the page from redrawing\n'
'- Bugs in the synthetic gesture code\n'
'- Page and benchmark out of sync (e.g. clicked element was renamed)\n'
'- Pages that render extremely slow\n'
'- Pages that can\'t be scrolled')
class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric):
"""Computes metrics that measure smoothness of animations over given ranges.
Animations are typically considered smooth if the frame rates are close to
60 frames per second (fps) and uniformly distributed over the sequence. To
determine if a timeline range contains a smooth animation, we update the
results object with several representative metrics:
frame_times: A list of raw frame times
mean_frame_time: The arithmetic mean of frame times
percentage_smooth: Percentage of frames that were hitting 60 FPS.
frame_time_discrepancy: The absolute discrepancy of frame timestamps
mean_pixels_approximated: The mean percentage of pixels approximated
queueing_durations: The queueing delay between compositor & main threads
Note that if any of the interaction records provided to AddResults have less
than 2 frames, we will return telemetry values with None values for each of
the smoothness metrics. Similarly, older browsers without support for
tracking the BeginMainFrame events will report a ListOfScalarValues with a
None value for the queueing duration metric.
"""
def __init__(self):
super(SmoothnessMetric, self).__init__()
def AddResults(self, model, renderer_thread, interaction_records, results):
self.VerifyNonOverlappedRecords(interaction_records)
renderer_process = renderer_thread.parent
stats = rendering_stats.RenderingStats(
renderer_process, model.browser_process, model.surface_flinger_process,
[r.GetBounds() for r in interaction_records])
has_surface_flinger_stats = model.surface_flinger_process is not None
self._PopulateResultsFromStats(results, stats, has_surface_flinger_stats)
def _PopulateResultsFromStats(self, results, stats,
has_surface_flinger_stats):
page = results.current_page
values = [
self._ComputeQueueingDuration(page, stats),
self._ComputeFrameTimeDiscrepancy(page, stats),
self._ComputeMeanPixelsApproximated(page, stats),
self._ComputeMeanPixelsCheckerboarded(page, stats)
]
values += self._ComputeLatencyMetric(page, stats, 'input_event_latency',
stats.input_event_latency)
values += self._ComputeLatencyMetric(page, stats, 'scroll_update_latency',
stats.scroll_update_latency)
values.append(self._ComputeFirstGestureScrollUpdateLatencies(page, stats))
values += self._ComputeFrameTimeMetric(page, stats)
if has_surface_flinger_stats:
values += self._ComputeSurfaceFlingerMetric(page, stats)
for v in values:
results.AddValue(v)
def _HasEnoughFrames(self, list_of_frame_timestamp_lists):
"""Whether we have collected at least two frames in every timestamp list."""
return all(len(s) >= 2 for s in list_of_frame_timestamp_lists)
@staticmethod
def _GetNormalizedDeltas(data, refresh_period, min_normalized_delta=None):
deltas = [t2 - t1 for t1, t2 in zip(data, data[1:])]
if min_normalized_delta != None:
deltas = [d for d in deltas
if d / refresh_period >= min_normalized_delta]
return (deltas, [delta / refresh_period for delta in deltas])
@staticmethod
def _JoinTimestampRanges(frame_timestamps):
"""Joins ranges of timestamps, adjusting timestamps to remove deltas
between the start of a range and the end of the prior range.
"""
timestamps = []
for timestamp_range in frame_timestamps:
if len(timestamps) == 0:
timestamps.extend(timestamp_range)
else:
for i in range(1, len(timestamp_range)):
timestamps.append(timestamps[-1] +
timestamp_range[i] - timestamp_range[i-1])
return timestamps
def _ComputeSurfaceFlingerMetric(self, page, stats):
jank_count = None
avg_surface_fps = None
max_frame_delay = None
frame_lengths = None
none_value_reason = None
if self._HasEnoughFrames(stats.frame_timestamps):
timestamps = self._JoinTimestampRanges(stats.frame_timestamps)
frame_count = len(timestamps)
milliseconds = timestamps[-1] - timestamps[0]
min_normalized_frame_length = 0.5
frame_lengths, normalized_frame_lengths = \
self._GetNormalizedDeltas(timestamps, stats.refresh_period,
min_normalized_frame_length)
if len(frame_lengths) < frame_count - 1:
logging.warning('Skipping frame lengths that are too short.')
frame_count = len(frame_lengths) + 1
if len(frame_lengths) == 0:
raise Exception('No valid frames lengths found.')
_, normalized_changes = \
self._GetNormalizedDeltas(frame_lengths, stats.refresh_period)
jankiness = [max(0, round(change)) for change in normalized_changes]
pause_threshold = 20
jank_count = sum(1 for change in jankiness
if change > 0 and change < pause_threshold)
avg_surface_fps = int(round((frame_count - 1) * 1000.0 / milliseconds))
max_frame_delay = round(max(normalized_frame_lengths))
frame_lengths = normalized_frame_lengths
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return (
scalar.ScalarValue(
page, 'avg_surface_fps', 'fps', avg_surface_fps,
description='Average frames per second as measured by the '
'platform\'s SurfaceFlinger.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page, 'jank_count', 'janks', jank_count,
description='Number of changes in frame rate as measured by the '
'platform\'s SurfaceFlinger.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN),
scalar.ScalarValue(
page, 'max_frame_delay', 'vsyncs', max_frame_delay,
description='Largest frame time as measured by the platform\'s '
'SurfaceFlinger.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN),
list_of_scalar_values.ListOfScalarValues(
page, 'frame_lengths', 'vsyncs', frame_lengths,
description='Frame time in vsyncs as measured by the platform\'s '
'SurfaceFlinger.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
)
def _ComputeLatencyMetric(self, page, stats, name, list_of_latency_lists):
"""Returns Values for the mean and discrepancy for given latency stats."""
mean_latency = None
latency_discrepancy = None
none_value_reason = None
if self._HasEnoughFrames(stats.frame_timestamps):
latency_list = perf_tests_helper.FlattenList(list_of_latency_lists)
if len(latency_list) == 0:
return ()
mean_latency = round(statistics.ArithmeticMean(latency_list), 3)
latency_discrepancy = (
round(statistics.DurationsDiscrepancy(latency_list), 4))
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return (
scalar.ScalarValue(
page, 'mean_%s' % name, 'ms', mean_latency,
description='Arithmetic mean of the raw %s values' % name,
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN),
scalar.ScalarValue(
page, '%s_discrepancy' % name, 'ms', latency_discrepancy,
description='Discrepancy of the raw %s values' % name,
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
)
def _ComputeFirstGestureScrollUpdateLatencies(self, page, stats):
"""Returns a ListOfScalarValuesValues of gesture scroll update latencies.
Returns a Value for the first gesture scroll update latency for each
interaction record in |stats|.
"""
none_value_reason = None
first_gesture_scroll_update_latencies = [round(latencies[0], 4)
for latencies in stats.gesture_scroll_update_latency
if len(latencies)]
if (not self._HasEnoughFrames(stats.frame_timestamps) or
not first_gesture_scroll_update_latencies):
first_gesture_scroll_update_latencies = None
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return list_of_scalar_values.ListOfScalarValues(
page, 'first_gesture_scroll_update_latency', 'ms',
first_gesture_scroll_update_latencies,
description='First gesture scroll update latency measures the time it '
'takes to process the very first gesture scroll update '
'input event. The first scroll gesture can often get '
'delayed by work related to page loading.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
def _ComputeQueueingDuration(self, page, stats):
"""Returns a Value for the frame queueing durations."""
queueing_durations = None
none_value_reason = None
if 'frame_queueing_durations' in stats.errors:
none_value_reason = stats.errors['frame_queueing_durations']
elif self._HasEnoughFrames(stats.frame_timestamps):
queueing_durations = perf_tests_helper.FlattenList(
stats.frame_queueing_durations)
if len(queueing_durations) == 0:
queueing_durations = None
none_value_reason = 'No frame queueing durations recorded.'
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return list_of_scalar_values.ListOfScalarValues(
page, 'queueing_durations', 'ms', queueing_durations,
description='The frame queueing duration quantifies how out of sync '
'the compositor and renderer threads are. It is the amount '
'of wall time that elapses between a '
'ScheduledActionSendBeginMainFrame event in the compositor '
'thread and the corresponding BeginMainFrame event in the '
'main thread.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
def _ComputeFrameTimeMetric(self, page, stats):
"""Returns Values for the frame time metrics.
This includes the raw and mean frame times, as well as the percentage of
frames that were hitting 60 fps.
"""
frame_times = None
mean_frame_time = None
percentage_smooth = None
none_value_reason = None
if self._HasEnoughFrames(stats.frame_timestamps):
frame_times = perf_tests_helper.FlattenList(stats.frame_times)
mean_frame_time = round(statistics.ArithmeticMean(frame_times), 3)
# We use 17ms as a somewhat looser threshold, instead of 1000.0/60.0.
smooth_threshold = 17.0
smooth_count = sum(1 for t in frame_times if t < smooth_threshold)
percentage_smooth = float(smooth_count) / len(frame_times) * 100.0
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return (
list_of_scalar_values.ListOfScalarValues(
page, 'frame_times', 'ms', frame_times,
description='List of raw frame times, helpful to understand the '
'other metrics.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN),
scalar.ScalarValue(
page, 'mean_frame_time', 'ms', mean_frame_time,
description='Arithmetic mean of frame times.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN),
scalar.ScalarValue(
page, 'percentage_smooth', 'score', percentage_smooth,
description='Percentage of frames that were hitting 60 fps.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
)
def _ComputeFrameTimeDiscrepancy(self, page, stats):
"""Returns a Value for the absolute discrepancy of frame time stamps."""
frame_discrepancy = None
none_value_reason = None
if self._HasEnoughFrames(stats.frame_timestamps):
frame_discrepancy = round(statistics.TimestampsDiscrepancy(
stats.frame_timestamps), 4)
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return scalar.ScalarValue(
page, 'frame_time_discrepancy', 'ms', frame_discrepancy,
description='Absolute discrepancy of frame time stamps, where '
'discrepancy is a measure of irregularity. It quantifies '
'the worst jank. For a single pause, discrepancy '
'corresponds to the length of this pause in milliseconds. '
'Consecutive pauses increase the discrepancy. This metric '
'is important because even if the mean and 95th '
'percentile are good, one long pause in the middle of an '
'interaction is still bad.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
def _ComputeMeanPixelsApproximated(self, page, stats):
"""Add the mean percentage of pixels approximated.
This looks at tiles which are missing or of low or non-ideal resolution.
"""
mean_pixels_approximated = None
none_value_reason = None
if self._HasEnoughFrames(stats.frame_timestamps):
mean_pixels_approximated = round(statistics.ArithmeticMean(
perf_tests_helper.FlattenList(
stats.approximated_pixel_percentages)), 3)
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return scalar.ScalarValue(
page, 'mean_pixels_approximated', 'percent', mean_pixels_approximated,
description='Percentage of pixels that were approximated '
'(checkerboarding, low-resolution tiles, etc.).',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
def _ComputeMeanPixelsCheckerboarded(self, page, stats):
"""Add the mean percentage of pixels checkerboarded.
This looks at tiles which are only missing.
It does not take into consideration tiles which are of low or
non-ideal resolution.
"""
mean_pixels_checkerboarded = None
none_value_reason = None
if self._HasEnoughFrames(stats.frame_timestamps):
if rendering_stats.CHECKERBOARDED_PIXEL_ERROR in stats.errors:
none_value_reason = stats.errors[
rendering_stats.CHECKERBOARDED_PIXEL_ERROR]
else:
mean_pixels_checkerboarded = round(statistics.ArithmeticMean(
perf_tests_helper.FlattenList(
stats.checkerboarded_pixel_percentages)), 3)
else:
none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
return scalar.ScalarValue(
page, 'mean_pixels_checkerboarded', 'percent',
mean_pixels_checkerboarded,
description='Percentage of pixels that were checkerboarded.',
none_value_reason=none_value_reason,
improvement_direction=improvement_direction.DOWN)
| {
"content_hash": "e3aebfc3dee7212c70180998ea759186",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 80,
"avg_line_length": 46.45639534883721,
"alnum_prop": 0.6759902384081097,
"repo_name": "Chilledheart/chromium",
"id": "62201809c70989862110298158335aaa3fdf4125",
"size": "16144",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/web_perf/metrics/smoothness.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9549264"
},
{
"name": "C++",
"bytes": "246614934"
},
{
"name": "CSS",
"bytes": "941919"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27365379"
},
{
"name": "Java",
"bytes": "15257671"
},
{
"name": "JavaScript",
"bytes": "20820575"
},
{
"name": "Makefile",
"bytes": "70983"
},
{
"name": "Objective-C",
"bytes": "1798644"
},
{
"name": "Objective-C++",
"bytes": "10138304"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "180150"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "494625"
},
{
"name": "Python",
"bytes": "8581270"
},
{
"name": "Shell",
"bytes": "485812"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
"""
Weibo OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/weibo.html
"""
from social.backends.oauth import BaseOAuth2
class WeiboOAuth2(BaseOAuth2):
"""Weibo (of sina) OAuth authentication backend"""
name = 'weibo'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://api.weibo.com/oauth2/authorize'
REQUEST_TOKEN_URL = 'https://api.weibo.com/oauth2/request_token'
ACCESS_TOKEN_URL = 'https://api.weibo.com/oauth2/access_token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('name', 'username'),
('profile_image_url', 'profile_image_url'),
('gender', 'gender')
]
def get_user_details(self, response):
"""Return user details from Weibo. API URL is:
https://api.weibo.com/2/users/show.json/?uid=<UID>&access_token=<TOKEN>
"""
if self.setting('DOMAIN_AS_USERNAME'):
username = response.get('domain', '')
else:
username = response.get('name', '')
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('screen_name', '')
)
return {'username': username,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
return self.get_json('https://api.weibo.com/2/users/show.json',
params={'access_token': access_token,
'uid': kwargs['response']['uid']})
| {
"content_hash": "a14a5f10557c5aec3fddcda5f56f4794",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 36.97674418604651,
"alnum_prop": 0.569811320754717,
"repo_name": "frankier/python-social-auth",
"id": "6082ae9d349eb98687153ce7fb976e70f71bea01",
"size": "1661",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "social/backends/weibo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "Makefile",
"bytes": "4735"
},
{
"name": "Python",
"bytes": "601246"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write("wrapper.py",
"""import os
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
foo = Environment()
link = foo.subst("$LINK")
bar = Environment(LINK = r'%(_python_)s wrapper.py ' + link)
foo.Program(target = 'foo', source = 'foo.c')
bar.Program(target = 'bar', source = 'bar.c')
""" % locals())
test.write('foo.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.write('bar.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.run(arguments = 'foo' + _exe)
test.fail_test(os.path.exists(test.workpath('wrapper.out')))
test.run(arguments = 'bar' + _exe)
test.fail_test(test.read('wrapper.out') != "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "3f724d7c97ad624f41fb0faa08c0ef2d",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 61,
"avg_line_length": 19.5,
"alnum_prop": 0.5944055944055944,
"repo_name": "Distrotech/scons",
"id": "25d9efb89675bd0503575661bc7d73b6bb90a7a6",
"size": "2389",
"binary": false,
"copies": "5",
"ref": "refs/heads/distrotech-scons",
"path": "test/LINK/LINK.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "12517068"
},
{
"name": "Shell",
"bytes": "20589"
}
],
"symlink_target": ""
} |
import pytest
from haralyzer import HarPage, HarParser, HarEntry
from haralyzer.compat import iteritems
from haralyzer.errors import PageNotFoundError
import re
import six
BAD_PAGE_ID = 'sup_dawg'
PAGE_ID = 'page_3'
def test_init(har_data):
"""
Test the object loading
"""
with pytest.raises(ValueError):
assert HarPage(PAGE_ID)
init_data = har_data('humanssuck.net.har')
# Throws PageNotFoundException with bad page ID
with pytest.raises(PageNotFoundError):
assert HarPage(BAD_PAGE_ID, har_data=init_data)
# Make sure it can load with either har_data or a parser
page = HarPage(PAGE_ID, har_data=init_data)
assert isinstance(page, HarPage)
assert repr(page) == "ID: page_3, URL: http://humanssuck.net/"
parser = HarParser(init_data)
page = HarPage(PAGE_ID, har_parser=parser)
assert isinstance(page, HarPage)
assert len(page.entries) == 4
# Make sure that the entries are actually in order. Going a little bit
# old school here.
for index in range(0, len(page.entries)):
if index != len(page.entries) - 1:
current_date = page.entries[index].startTime
next_date = page.entries[index + 1].startTime
assert current_date <= next_date
def test_no_title(har_data):
"""
A page with no title should set the title property as an empty string
instead of throwing an exception.
"""
init_data = har_data('no_title.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.title == ''
def test_filter_entries(har_data):
"""
Tests ability to filter entries, with or without regex
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
# Filter by request type only
entries = page.filter_entries(request_type='.*ET')
assert len(entries) == 4
for entry in entries:
assert entry.request.method == entry["request"]["method"] == 'GET'
# Filter by request type and content_type
entries = page.filter_entries(request_type='.*ET', content_type='image.*')
assert len(entries) == 1
for entry in entries:
assert entry.request.method == entry["request"]["method"] == 'GET'
for header in entry.request.headers:
if header['name'] == 'Content-Type':
assert re.search('image.*', header['value'])
# Filter by request type, content type, and status code
entries = page.filter_entries(request_type='.*ET', content_type='image.*',
status_code='2.*')
assert len(entries) == 1
for entry in entries:
assert entry.request.method == entry["request"]["method"] == 'GET'
assert re.search('2.*', str(entry.response.status))
for header in entry.response.headers:
if header['name'] == 'Content-Type':
assert re.search('image.*', header['value'])
for header in entry["response"]["headers"]:
if header['name'] == 'Content-Type':
assert re.search('image.*', header['value'])
entries = page.filter_entries(request_type='.*ST')
assert len(entries) == 0
entries = page.filter_entries(request_type='.*ET', content_type='video.*')
assert len(entries) == 0
entries = page.filter_entries(request_type='.*ET', content_type='image.*',
status_code='3.*')
assert len(entries) == 0
def test_filter_entries_load_time(har_data):
"""
Tests ability to filter entries by load time
"""
init_data = har_data('humanssuck.net_duplicate_url.har')
page = HarPage(PAGE_ID, har_data=init_data)
entries = page.filter_entries(load_time__gt=100)
assert len(entries) == 4
entries = page.filter_entries(load_time__gt=300)
assert len(entries) == 3
entries = page.filter_entries(load_time__gt=500)
assert len(entries) == 0
def test_get_load_time(har_data):
"""
Tests HarPage.get_load_time()
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.get_load_time(request_type='GET') == 463
assert page.get_load_time(request_type='GET', asynchronous=False) == 843
assert page.get_load_time(content_type='image.*') == 304
assert page.get_load_time(status_code='2.*') == 463
def test_entries(har_data):
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
for entry in page.entries:
assert entry.pageref == entry["pageref"] == page.page_id
@pytest.mark.skipif(six.PY3, reason="Runs with Python 2")
def test_iteration_python2(har_data):
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
entries = [x for x in page]
assert len(entries) == 4
iter_object = iter(page)
assert str(next(iter_object)) == 'HarEntry for http://humanssuck.net/'
assert str(next(iter_object)) == 'HarEntry for http://humanssuck.net/test.css'
assert str(next(iter_object)) == 'HarEntry for http://humanssuck.net/screen_login.gif'
assert str(next(iter_object)) == 'HarEntry for http://humanssuck.net/jquery-1.7.1.min.js'
with pytest.raises(StopIteration):
assert next(iter_object)
@pytest.mark.skipif(six.PY2, reason="Runs with Python 3")
def test_iteration_python2(har_data):
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
entries = [x for x in page]
assert len(entries) == 4
assert str(next(page)) == 'HarEntry for http://humanssuck.net/'
assert str(next(page)) == 'HarEntry for http://humanssuck.net/test.css'
assert str(next(page)) == 'HarEntry for http://humanssuck.net/screen_login.gif'
assert str(next(page)) == 'HarEntry for http://humanssuck.net/jquery-1.7.1.min.js'
with pytest.raises(StopIteration):
assert next(page)
def test_file_types(har_data):
"""
Test file type properties
"""
init_data = har_data('cnn.har')
page = HarPage(PAGE_ID, har_data=init_data)
file_types = {'image_files': ['image'], 'css_files': ['css'],
'js_files': ['javascript'], 'audio_files': ['audio'],
'video_files': ['video', 'flash'], 'text_files': ['text'],
'html_files': ['html']}
for k, v in iteritems(file_types):
for asset in getattr(page, k, None):
assert _correct_file_type(asset, v)
def test_request_types(har_data):
"""
Test request type filters
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
# Check request type lists
for req in page.get_requests:
assert req.request.method == req["request"]["method"] == 'GET'
for req in page.post_requests:
assert req.request.method == req["request"]["method"] == 'POST'
def test_sizes_trans(har_data):
init_data = har_data('cnn-chrome.har')
page = HarPage('page_1', har_data=init_data)
assert page.page_size_trans == 2609508
assert page.text_size_trans == 569814
assert page.css_size_trans == 169573
assert page.js_size_trans == 1600321
assert page.image_size_trans == 492950
# TODO - Get test data for audio and video
assert page.audio_size_trans == 0
assert page.video_size_trans == 0
def test_sizes(har_data):
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.page_size == 62204
assert page.text_size == 246
assert page.css_size == 8
assert page.js_size == 38367
assert page.image_size == 23591
# TODO - Get test data for audio and video
assert page.audio_size == 0
assert page.video_size == 0
def test_load_times(har_data):
"""
This whole test really needs better sample data. I need to make a
web page with like 2-3 of each asset type to really test the load times.
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
# Check initial page load
assert page.actual_page.request.url == 'http://humanssuck.net/'
# Check initial page load times
assert page.initial_load_time == 153
assert page.content_load_time == 543
# Check content type browser (async) load times
assert page.image_load_time == 304
assert page.css_load_time == 76
assert page.js_load_time == 310
assert page.html_load_time == 153
assert page.page_load_time == 567
# TODO - Need to get sample data for these types
assert page.audio_load_time == 0
assert page.video_load_time == 0
def test_time_to_first_byte(har_data):
"""
Tests that TTFB is correctly reported as a property of the page.
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.time_to_first_byte == 153
def test_hostname(har_data):
"""
Makes sure that the correct hostname is returned.
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.hostname == 'humanssuck.net'
def test_url(har_data):
"""
Makes sure that the correct URL is returned.
"""
init_data = har_data('humanssuck.net.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.url == 'http://humanssuck.net/'
def _correct_file_type(entry, file_types):
for header in entry.response.headers:
if header['name'] == 'Content-Type':
return any(ft in header['value'] for ft in file_types)
def test_duplicate_urls_count(har_data):
"""
Makes sure that the correct number of urls that appear more than once in har is displayed.
"""
init_data = har_data('humanssuck.net_duplicate_url.har')
page = HarPage(PAGE_ID, har_data=init_data)
assert page.duplicate_url_request == {'http://humanssuck.net/jquery-1.7.1.min.js': 2}
| {
"content_hash": "f596488f717d3853211f8050c52e38e6",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 94,
"avg_line_length": 34.60489510489511,
"alnum_prop": 0.6405981610589068,
"repo_name": "mrname/haralyzer",
"id": "c1ee06c2e4469c810cd8d6a217951314ca3b51c9",
"size": "9897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "48790"
}
],
"symlink_target": ""
} |
from rest_framework.generics import RetrieveUpdateDestroyAPIView, ListCreateAPIView
from rest_framework.response import Response
from django.utils.encoding import iri_to_uri
class JSONRootSingleModelView(RetrieveUpdateDestroyAPIView):
'''
allows GET, PUT, and DELETE
GET returns a single instance of the view's model with the json data
nested inside the view's jsonroot
'''
#retrieve is called by self.get and returns json with self.jsonroot
def retrieve(self, request, *args, **kwargs):
json_response = {}
self.object = self.get_object()
serial_data = self.get_serializer(self.object).data
json_response[self.jsonroot] = serial_data
return Response(json_response)
class JSONRootMultipleModelView(ListCreateAPIView):
'''
allows GET, POST
GET returns a list of the view's model that may vary depending on queryset
Overrides method inherited from django's MultipleObjectMixin
this method allows for request.META.get('QUERY_STRING') to be evaluated
and converted to a query that is run against the database
N.B. setting a queryset attribute on the view will raise an error
'''
def list(self, request, *args, **kwargs):
json_plural_response = {}
#TODO
#this tester is here because of occasional issues w/ QUERY_STRING
tester = iri_to_uri(request.META.get('QUERY_STRING', ''))
queryset = self.get_queryset()
self.object_list = self.filter_queryset(queryset)
#code from DJRF List mixin that checks if empty querysets are allowed
allow_empty = self.get_allow_empty()
if not allow_empty and not self.object_list:
class_name = self.__class__.__name__
error_msg = self.empty_error % {'class_name': class_name}
raise Http404(error_msg)
#CODE FOR PAGINATION HAS BEEN REMOVED FOR THE MOMENT
#MAY ADD IT BACK AT A LATER TIME IF DESIRED
serial_data = self.get_serializer(self.object_list).data
json_plural_response[self.jsonroot] = serial_data
return Response(json_plural_response)
| {
"content_hash": "3e4b6260f60d799321e2dd062859e44c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 83,
"avg_line_length": 43.58695652173913,
"alnum_prop": 0.7301745635910224,
"repo_name": "4South/django-on-fire",
"id": "69f872929d8e935ce4048fd3667cf75df2eb1686",
"size": "2005",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_on_fire/jsonrootviews.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10606"
}
],
"symlink_target": ""
} |
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r'protonflux', views.ProtonfluxViewSet)
router.register(r'electronflux', views.ElectronfluxViewSet)
router.register(r'xrayflux', views.XrayfluxViewSet)
router.register(r'sunspot', views.SunspotViewSet)
router.register(r'ptypes', views.PtypeViewSet)
router.register(r'etypes', views.EtypeViewSet)
router.register(r'xtypes', views.XtypeViewSet)
router.register(r'sunspottypes', views.SunspottypeViewSet)
router.register(r'sunspotregion', views.SunspotregionViewSet)
router.register(r'alerttypes', views.AlerttypeViewSet)
router.register(r'alerts', views.AlertViewSet)
router.register(r'channeltypes', views.ChanneltypeViewSet)
router.register(r'imagechannels', views.ImagechannelViewSet)
router.register(r'solarradiationtypes', views.SolarradiationtypeViewSet)
router.register(r'solarradiation', views.SolarradiationViewSet)
router.register(r'radioblackout', views.RadioblackoutViewSet)
router.register(r'radioblackouttype', views.RadioblackouttypeViewSet)
router.register(r'geomagactivity', views.GeomagactivityViewSet)
router.register(r'solarwind', views.SolarwindViewSet)
router.register(r'forecastrationale', views.ForecastrationaleViewSet)
| {
"content_hash": "38a0331a4601cb1ff909ef7083c5299a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 48.30769230769231,
"alnum_prop": 0.8407643312101911,
"repo_name": "diacritica/spaceweather",
"id": "91590446ec8873656b91f5515ca53fb1efa51fae",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/spaceweather/core/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62344"
},
{
"name": "Shell",
"bytes": "2601"
}
],
"symlink_target": ""
} |
import py
from rpython.jit.metainterp.history import ConstInt, INT, FLOAT
from rpython.jit.backend.llsupport.regalloc import FrameManager, LinkedList
from rpython.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan
from rpython.jit.metainterp.resoperation import InputArgInt, InputArgRef,\
InputArgFloat
def newboxes(*values):
return [InputArgInt(v) for v in values]
def newrefboxes(count):
return [InputArgRef() for _ in range(count)]
def boxes_and_longevity(num):
res = []
longevity = {}
for i in range(num):
box = InputArgInt(0)
res.append(box)
longevity[box] = (0, 1)
return res, longevity
class FakeReg(object):
def __init__(self, i):
self.n = i
def __repr__(self):
return 'r%d' % self.n
r0, r1, r2, r3 = [FakeReg(i) for i in range(4)]
regs = [r0, r1, r2, r3]
class RegisterManager(BaseRegMan):
all_regs = regs
def convert_to_imm(self, v):
return v
class FakeFramePos(object):
def __init__(self, pos, box_type):
self.pos = pos
self.box_type = box_type
def __repr__(self):
return 'FramePos<%d,%s>' % (self.pos, self.box_type)
def __eq__(self, other):
return self.pos == other.pos and self.box_type == other.box_type
def __ne__(self, other):
return not self == other
class TFrameManagerEqual(FrameManager):
def frame_pos(self, i, box_type):
return FakeFramePos(i, box_type)
def frame_size(self, box_type):
return 1
def get_loc_index(self, loc):
assert isinstance(loc, FakeFramePos)
return loc.pos
class TFrameManager(FrameManager):
def frame_pos(self, i, box_type):
return FakeFramePos(i, box_type)
def frame_size(self, box_type):
if box_type == FLOAT:
return 2
else:
return 1
def get_loc_index(self, loc):
assert isinstance(loc, FakeFramePos)
return loc.pos
class MockAsm(object):
def __init__(self):
self.moves = []
def regalloc_mov(self, from_loc, to_loc):
self.moves.append((from_loc, to_loc))
class TestRegalloc(object):
def test_freeing_vars(self):
b0, b1, b2 = newboxes(0, 0, 0)
longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)}
rm = RegisterManager(longevity)
rm.next_instruction()
for b in b0, b1, b2:
rm.try_allocate_reg(b)
rm._check_invariants()
assert len(rm.free_regs) == 1
assert len(rm.reg_bindings) == 3
rm.possibly_free_vars([b0, b1, b2])
assert len(rm.free_regs) == 1
assert len(rm.reg_bindings) == 3
rm._check_invariants()
rm.next_instruction()
rm.possibly_free_vars([b0, b1, b2])
rm._check_invariants()
assert len(rm.free_regs) == 2
assert len(rm.reg_bindings) == 2
rm._check_invariants()
rm.next_instruction()
rm.possibly_free_vars([b0, b1, b2])
rm._check_invariants()
assert len(rm.free_regs) == 4
assert len(rm.reg_bindings) == 0
def test_register_exhaustion(self):
boxes, longevity = boxes_and_longevity(5)
rm = RegisterManager(longevity)
rm.next_instruction()
for b in boxes[:len(regs)]:
assert rm.try_allocate_reg(b)
assert rm.try_allocate_reg(boxes[-1]) is None
rm._check_invariants()
def test_need_lower_byte(self):
boxes, longevity = boxes_and_longevity(5)
b0, b1, b2, b3, b4 = boxes
class XRegisterManager(RegisterManager):
no_lower_byte_regs = [r2, r3]
rm = XRegisterManager(longevity)
rm.next_instruction()
loc0 = rm.try_allocate_reg(b0, need_lower_byte=True)
assert loc0 not in XRegisterManager.no_lower_byte_regs
loc = rm.try_allocate_reg(b1, need_lower_byte=True)
assert loc not in XRegisterManager.no_lower_byte_regs
loc = rm.try_allocate_reg(b2, need_lower_byte=True)
assert loc is None
loc = rm.try_allocate_reg(b0, need_lower_byte=True)
assert loc is loc0
rm._check_invariants()
def test_specific_register(self):
boxes, longevity = boxes_and_longevity(5)
rm = RegisterManager(longevity)
rm.next_instruction()
loc = rm.try_allocate_reg(boxes[0], selected_reg=r1)
assert loc is r1
loc = rm.try_allocate_reg(boxes[1], selected_reg=r1)
assert loc is None
rm._check_invariants()
loc = rm.try_allocate_reg(boxes[0], selected_reg=r1)
assert loc is r1
loc = rm.try_allocate_reg(boxes[0], selected_reg=r2)
assert loc is r2
rm._check_invariants()
def test_force_allocate_reg(self):
boxes, longevity = boxes_and_longevity(5)
b0, b1, b2, b3, b4 = boxes
fm = TFrameManager()
class XRegisterManager(RegisterManager):
no_lower_byte_regs = [r2, r3]
rm = XRegisterManager(longevity,
frame_manager=fm,
assembler=MockAsm())
rm.next_instruction()
loc = rm.force_allocate_reg(b0)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b1)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b2)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b3)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b4)
assert isinstance(loc, FakeReg)
# one of those should be now somewhere else
locs = [rm.loc(b) for b in boxes]
used_regs = [loc for loc in locs if isinstance(loc, FakeReg)]
assert len(used_regs) == len(regs)
loc = rm.force_allocate_reg(b0, need_lower_byte=True)
assert isinstance(loc, FakeReg)
assert loc not in [r2, r3]
rm._check_invariants()
def test_make_sure_var_in_reg(self):
boxes, longevity = boxes_and_longevity(5)
fm = TFrameManager()
rm = RegisterManager(longevity, frame_manager=fm,
assembler=MockAsm())
rm.next_instruction()
# allocate a stack position
b0, b1, b2, b3, b4 = boxes
sp = fm.loc(b0)
assert sp.pos == 0
loc = rm.make_sure_var_in_reg(b0)
assert isinstance(loc, FakeReg)
rm._check_invariants()
def test_force_result_in_reg_1(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: (0, 1), b1: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
# first path, var is already in reg and dies
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is loc0
assert len(asm.moves) == 0
rm._check_invariants()
def test_force_result_in_reg_2(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: (0, 2), b1: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is loc0
assert rm.loc(b0) is not loc0
assert len(asm.moves) == 1
rm._check_invariants()
def test_force_result_in_reg_3(self):
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
longevity = {b0: (0, 2), b1: (0, 2), b3: (0, 2), b2: (0, 2), b4: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert not len(rm.free_regs)
rm._check_invariants()
rm.next_instruction()
rm.force_result_in_reg(b4, b0)
rm._check_invariants()
assert len(asm.moves) == 1
def test_force_result_in_reg_4(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: (0, 1), b1: (0, 1)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
fm.loc(b0)
rm.force_result_in_reg(b1, b0)
rm._check_invariants()
loc = rm.loc(b1)
assert isinstance(loc, FakeReg)
loc = rm.loc(b0)
assert isinstance(loc, FakeFramePos)
assert len(asm.moves) == 1
def test_bogus_make_sure_var_in_reg(self):
b0, = newboxes(0)
longevity = {b0: (0, 1)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
# invalid call to make_sure_var_in_reg(): box unknown so far
py.test.raises(KeyError, rm.make_sure_var_in_reg, b0)
def test_return_constant(self):
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
fm = TFrameManager()
rm = RegisterManager(longevity, assembler=asm,
frame_manager=fm)
rm.next_instruction()
loc = rm.return_constant(ConstInt(1), selected_reg=r1)
assert loc is r1
loc = rm.return_constant(ConstInt(1), selected_reg=r1)
assert loc is r1
loc = rm.return_constant(ConstInt(1))
assert isinstance(loc, ConstInt)
for box in boxes[:-1]:
rm.force_allocate_reg(box)
assert len(asm.moves) == 2 # Const(1) -> r1, twice
assert len(rm.reg_bindings) == 4
rm._check_invariants()
def test_force_result_in_reg_const(self):
boxes, longevity = boxes_and_longevity(2)
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm,
assembler=asm)
rm.next_instruction()
c = ConstInt(0)
rm.force_result_in_reg(boxes[0], c)
rm._check_invariants()
def test_loc_of_const(self):
rm = RegisterManager({})
rm.next_instruction()
assert isinstance(rm.loc(ConstInt(1)), ConstInt)
def test_call_support(self):
class XRegisterManager(RegisterManager):
save_around_call_regs = [r1, r2]
def call_result_location(self, v):
return r1
fm = TFrameManager()
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
rm = XRegisterManager(longevity, frame_manager=fm,
assembler=asm)
for b in boxes[:-1]:
rm.force_allocate_reg(b)
rm.before_call()
assert len(rm.reg_bindings) == 2
assert fm.get_frame_depth() == 2
assert len(asm.moves) == 2
rm._check_invariants()
rm.after_call(boxes[-1])
assert len(rm.reg_bindings) == 3
rm._check_invariants()
def test_call_support_save_all_regs(self):
class XRegisterManager(RegisterManager):
save_around_call_regs = [r1, r2]
def call_result_location(self, v):
return r1
fm = TFrameManager()
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
rm = XRegisterManager(longevity, frame_manager=fm,
assembler=asm)
for b in boxes[:-1]:
rm.force_allocate_reg(b)
rm.before_call(save_all_regs=True)
assert len(rm.reg_bindings) == 0
assert fm.get_frame_depth() == 4
assert len(asm.moves) == 4
rm._check_invariants()
rm.after_call(boxes[-1])
assert len(rm.reg_bindings) == 1
rm._check_invariants()
def test_different_frame_width(self):
class XRegisterManager(RegisterManager):
pass
fm = TFrameManager()
b0 = InputArgInt()
longevity = {b0: (0, 1)}
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
f0 = InputArgFloat()
longevity = {f0: (0, 1)}
xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm)
xrm.loc(f0)
rm.loc(b0)
assert fm.get_frame_depth() == 3
def test_spilling(self):
b0, b1, b2, b3, b4, b5 = newboxes(0, 1, 2, 3, 4, 5)
longevity = {b0: (0, 3), b1: (0, 3), b3: (0, 5), b2: (0, 2), b4: (1, 4), b5: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert len(rm.free_regs) == 0
rm.next_instruction()
loc = rm.loc(b3)
spilled = rm.force_allocate_reg(b4)
assert spilled is loc
spilled2 = rm.force_allocate_reg(b5)
assert spilled2 is loc
rm._check_invariants()
def test_hint_frame_locations_1(self):
for hint_value in range(11):
b0, = newboxes(0)
fm = TFrameManager()
fm.hint_frame_pos[b0] = hint_value
blist = newboxes(*range(10))
for b1 in blist:
fm.loc(b1)
for b1 in blist:
fm.mark_as_free(b1)
assert fm.get_frame_depth() == 10
loc = fm.loc(b0)
if hint_value < 10:
expected = hint_value
else:
expected = 0
assert fm.get_loc_index(loc) == expected
assert fm.get_frame_depth() == 10
def test_linkedlist(self):
class Loc(object):
def __init__(self, pos, size, tp):
self.pos = pos
self.size = size
self.tp = tp
class FrameManager(object):
@staticmethod
def get_loc_index(item):
return item.pos
@staticmethod
def frame_pos(pos, tp):
if tp == 13:
size = 2
else:
size = 1
return Loc(pos, size, tp)
fm = FrameManager()
l = LinkedList(fm)
l.append(1, Loc(1, 1, 0))
l.append(1, Loc(4, 1, 0))
l.append(1, Loc(2, 1, 0))
l.append(1, Loc(0, 1, 0))
assert l.master_node.val == 0
assert l.master_node.next.val == 1
assert l.master_node.next.next.val == 2
assert l.master_node.next.next.next.val == 4
assert l.master_node.next.next.next.next is None
item = l.pop(1, 0)
assert item.pos == 0
item = l.pop(1, 0)
assert item.pos == 1
item = l.pop(1, 0)
assert item.pos == 2
item = l.pop(1, 0)
assert item.pos == 4
assert l.pop(1, 0) is None
l.append(1, Loc(1, 1, 0))
l.append(1, Loc(5, 1, 0))
l.append(1, Loc(2, 1, 0))
l.append(1, Loc(0, 1, 0))
item = l.pop(2, 13)
assert item.tp == 13
assert item.pos == 0
assert item.size == 2
assert l.pop(2, 0) is None # 2 and 4
l.append(1, Loc(4, 1, 0))
item = l.pop(2, 13)
assert item.pos == 4
assert item.size == 2
assert l.pop(1, 0).pos == 2
assert l.pop(1, 0) is None
l.append(2, Loc(1, 2, 0))
# this will not work because the result will be odd
assert l.pop(2, 13) is None
l.append(1, Loc(3, 1, 0))
item = l.pop(2, 13)
assert item.pos == 2
assert item.tp == 13
assert item.size == 2
def test_frame_manager_basic_equal(self):
b0, b1 = newboxes(0, 1)
fm = TFrameManagerEqual()
loc0 = fm.loc(b0)
assert fm.get_loc_index(loc0) == 0
#
assert fm.get(b1) is None
loc1 = fm.loc(b1)
assert fm.get_loc_index(loc1) == 1
assert fm.get(b1) == loc1
#
loc0b = fm.loc(b0)
assert loc0b == loc0
#
fm.loc(InputArgInt())
assert fm.get_frame_depth() == 3
#
f0 = InputArgFloat()
locf0 = fm.loc(f0)
assert fm.get_loc_index(locf0) == 3
assert fm.get_frame_depth() == 4
#
f1 = InputArgFloat()
locf1 = fm.loc(f1)
assert fm.get_loc_index(locf1) == 4
assert fm.get_frame_depth() == 5
fm.mark_as_free(b1)
assert fm.freelist
b2 = InputArgInt()
fm.loc(b2) # should be in the same spot as b1 before
assert fm.get(b1) is None
assert fm.get(b2) == loc1
fm.mark_as_free(b0)
p0 = InputArgRef()
ploc = fm.loc(p0)
assert fm.get_loc_index(ploc) == 0
assert fm.get_frame_depth() == 5
assert ploc != loc1
p1 = InputArgRef()
p1loc = fm.loc(p1)
assert fm.get_loc_index(p1loc) == 5
assert fm.get_frame_depth() == 6
fm.mark_as_free(p0)
p2 = InputArgRef()
p2loc = fm.loc(p2)
assert p2loc == ploc
assert len(fm.freelist) == 0
for box in fm.bindings.keys():
fm.mark_as_free(box)
fm.bind(InputArgRef(), FakeFramePos(3, 'r'))
assert len(fm.freelist) == 6
def test_frame_manager_basic(self):
b0, b1 = newboxes(0, 1)
fm = TFrameManager()
loc0 = fm.loc(b0)
assert fm.get_loc_index(loc0) == 0
#
assert fm.get(b1) is None
loc1 = fm.loc(b1)
assert fm.get_loc_index(loc1) == 1
assert fm.get(b1) == loc1
#
loc0b = fm.loc(b0)
assert loc0b == loc0
#
fm.loc(InputArgInt())
assert fm.get_frame_depth() == 3
#
f0 = InputArgFloat()
locf0 = fm.loc(f0)
# can't be odd
assert fm.get_loc_index(locf0) == 4
assert fm.get_frame_depth() == 6
#
f1 = InputArgFloat()
locf1 = fm.loc(f1)
assert fm.get_loc_index(locf1) == 6
assert fm.get_frame_depth() == 8
fm.mark_as_free(b1)
assert fm.freelist
b2 = InputArgInt()
fm.loc(b2) # should be in the same spot as b1 before
assert fm.get(b1) is None
assert fm.get(b2) == loc1
fm.mark_as_free(b0)
p0 = InputArgRef()
ploc = fm.loc(p0)
assert fm.get_loc_index(ploc) == 0
assert fm.get_frame_depth() == 8
assert ploc != loc1
p1 = InputArgRef()
p1loc = fm.loc(p1)
assert fm.get_loc_index(p1loc) == 3
assert fm.get_frame_depth() == 8
fm.mark_as_free(p0)
p2 = InputArgRef()
p2loc = fm.loc(p2)
assert p2loc == ploc
assert len(fm.freelist) == 0
fm.mark_as_free(b2)
f3 = InputArgFloat()
fm.mark_as_free(p2)
floc = fm.loc(f3)
assert fm.get_loc_index(floc) == 0
for box in fm.bindings.keys():
fm.mark_as_free(box)
| {
"content_hash": "104b09c0841b448a2d3d8ad983d60447",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 92,
"avg_line_length": 33.52982456140351,
"alnum_prop": 0.5444746755964839,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "297dc2a32713b8b60eaaa62b7c3e70adb5832671",
"size": "19112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/jit/backend/llsupport/test/test_regalloc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
} |
import datetime
import time
import redis
import logging
import functools
from django_sockjs_server.lib.config import SockJSServerSettings
def reconnect_wrapper(func):
@functools.wraps(func)
def myfunc(self, *args, **kwargs):
while True:
try:
return func(self, *args, **kwargs)
except redis.ConnectionError:
self.logger.info('django-sockjs-server(RedisClient): error connect, wait 5 sec')
self.connect()
time.sleep(5)
return myfunc
class RedisClient(object):
def __init__(self):
self.config = SockJSServerSettings()
self.connect_tries = 0
self.connecting = False
self.last_reconnect = None
self.connect()
self.logger = logging.getLogger(__name__)
def get_uptime(self):
if self.last_reconnect:
return (datetime.datetime.now() - self.last_reconnect).seconds
def connect(self):
if not self.connecting:
self.connecting = True
self.connect_tries += 1
while self.connecting:
try:
self.redis = redis.StrictRedis(
host=self.config.redis_host,
port=self.config.redis_port,
db=self.config.redis_db,
password=self.config.redis_password
)
except redis.ConnectionError:
self.logger.info('django-sockjs-server(RedisClient): error connect, wait 5 sec')
time.sleep(5)
else:
self.connecting = False
self.last_reconnect = datetime.datetime.now()
else:
self.logger.info('django-sockjs-server(RedisClient): already connected')
def get_real_key(self, key):
return self.config.redis_prefix + key
def log(self, *args):
formatters = "%s " * len(args)
format_string = "django-sockjs-server(RedisClient): " + formatters
self.logger.debug(format_string % args)
@reconnect_wrapper
def lpush(self, key, *args, **kwargs):
self.log('lpush', key, args, kwargs)
return self.redis.lpush(self.get_real_key(key), *args, **kwargs)
@reconnect_wrapper
def lrange(self, key, *args, **kwargs):
self.log('lrange', key, args, kwargs)
return self.redis.lrange(self.get_real_key(key), *args, **kwargs)
@reconnect_wrapper
def lrem(self, key, num, value):
self.log('lrem', key, num, value)
return self.redis.lrem(self.get_real_key(key), num, value)
redis_client = RedisClient()
| {
"content_hash": "c070b3410eb40fe74f41a35ff486d7db",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 100,
"avg_line_length": 33.5875,
"alnum_prop": 0.5723855601042054,
"repo_name": "alfss/django-sockjs-server",
"id": "26bb83292806da9f91fcbf9104353d44f7d566f4",
"size": "2687",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_sockjs_server/lib/redis_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1272"
},
{
"name": "JavaScript",
"bytes": "160"
},
{
"name": "Python",
"bytes": "34478"
}
],
"symlink_target": ""
} |
import unittest
from wikisync.plugin import WikiSyncEnvironment
from wikisync.model import WikiSyncDao
from trac.test import EnvironmentStub
from pkg_resources import resource_filename
class WikiSyncModelTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.plugin = WikiSyncEnvironment(self.env)
self.plugin.upgrade_environment(self.env.get_db_cnx())
self.dao = WikiSyncDao(self.env)
file = resource_filename(__name__, "data.sql")
@self.env.with_transaction()
def do_save(db):
cursor = db.cursor()
with open(file, "rb") as f:
for sql in f:
if sql:
cursor.execute(sql)
def test_sync_wiki_data(self):
self.dao.sync_wiki_data()
for name in ["CamelCase", "InterMapTxt", "NewPage", "WikiRestructuredTextLinks"]:
self.assertTrue(self.dao.find(name) is not None)
def test_all(self):
results = [item for item in self.dao.all()]
self.assertEqual(len(results), 3)
def test_validate(self):
item = self.dao.factory()
self.assertRaises(AssertionError, item.validate)
item.replace(name="Test").validate()
def test_read(self):
item = self.dao.find("Test1")
self.assertEqual(self.dao.find("Unknown"), None)
def test_create(self):
self.dao.create(
self.dao.factory(
name="Test2",
remote_version=2
)
)
db = self.env.get_read_db()
cursor = db.cursor()
cursor.execute("""
SELECT name, remote_version FROM wikisync
WHERE name='Test2'
""")
results = cursor.fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0], (u'Test2', 2))
def test_delete(self):
item = self.dao.find("Test1")
self.assertTrue(item is not None)
self.dao.delete(item)
self.assertTrue(self.dao.find("Test1") is None)
def test_update(self):
item = self.dao.find("Test1")
item = item.replace(sync_remote_version=1, remote_version=11)
self.dao.update(item)
db = self.env.get_read_db()
cursor = db.cursor()
cursor.execute("""
SELECT sync_remote_version, remote_version FROM wikisync
WHERE name='Test1'
""")
results = cursor.fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0], (1, 11))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WikiSyncModelTestCase, 'test'))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite") | {
"content_hash": "c338933a617957980bae0ed946dfb889",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 89,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.5851025548758546,
"repo_name": "ivanchoo/TracWikiSync",
"id": "e4dfc0f3d19610f5103375517d67181e79c50dad",
"size": "2803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikisync/tests/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "23793"
},
{
"name": "Python",
"bytes": "46769"
}
],
"symlink_target": ""
} |
import unittest
import jmespath
from parameterized import parameterized
from tests.charts.helm_template_generator import render_chart
class IngressWebTest(unittest.TestCase):
def test_should_pass_validation_with_just_ingress_enabled_v1(self):
render_chart(
values={"ingress": {"web": {"enabled": True}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
) # checks that no validation exception is raised
def test_should_pass_validation_with_just_ingress_enabled_v1beta1(self):
render_chart(
values={"ingress": {"web": {"enabled": True}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
kubernetes_version='1.16.0',
) # checks that no validation exception is raised
def test_should_allow_more_than_one_annotation(self):
docs = render_chart(
values={"ingress": {"web": {"enabled": True, "annotations": {"aa": "bb", "cc": "dd"}}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert {"aa": "bb", "cc": "dd"} == jmespath.search("metadata.annotations", docs[0])
def test_should_set_ingress_class_name(self):
docs = render_chart(
values={"ingress": {"web": {"enabled": True, "ingressClassName": "foo"}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert "foo" == jmespath.search("spec.ingressClassName", docs[0])
def test_should_ingress_hosts_objs_have_priority_over_host(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "oldsecret"},
"hosts": [
{"name": "*.a-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "b-host", "tls": {"enabled": True, "secretName": "newsecret2"}},
{"name": "c-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "d-host", "tls": {"enabled": False, "secretName": ""}},
{"name": "e-host"},
],
"host": "old-host",
},
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert ["*.a-host", "b-host", "c-host", "d-host", "e-host"] == jmespath.search(
"spec.rules[*].host", docs[0]
)
assert [
{"hosts": ["*.a-host"], "secretName": "newsecret1"},
{"hosts": ["b-host"], "secretName": "newsecret2"},
{"hosts": ["c-host"], "secretName": "newsecret1"},
] == jmespath.search("spec.tls[*]", docs[0])
def test_should_ingress_hosts_strs_have_priority_over_host(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "secret"},
"hosts": ["*.a-host", "b-host", "c-host", "d-host"],
"host": "old-host",
},
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert ["*.a-host", "b-host", "c-host", "d-host"] == jmespath.search("spec.rules[*].host", docs[0])
assert [
{"hosts": ["*.a-host", "b-host", "c-host", "d-host"], "secretName": "secret"}
] == jmespath.search("spec.tls[*]", docs[0])
def test_should_ingress_deprecated_host_and_top_level_tls_still_work(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "supersecret"},
"host": "old-host",
},
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert (
["old-host"]
== jmespath.search("spec.rules[*].host", docs[0])
== jmespath.search("spec.tls[0].hosts", docs[0])
)
def test_should_ingress_host_entry_not_exist(self):
docs = render_chart(
values={
"ingress": {
"web": {
"enabled": True,
}
}
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert not jmespath.search("spec.rules[*].host", docs[0])
@parameterized.expand(
[
(None, None, False),
(None, False, False),
(None, True, True),
(False, None, False),
(True, None, True),
(False, True, True), # We will deploy it if _either_ are true
(True, False, True),
]
)
def test_ingress_created(self, global_value, web_value, expected):
values = {"ingress": {}}
if global_value is not None:
values["ingress"]["enabled"] = global_value
if web_value is not None:
values["ingress"]["web"] = {"enabled": web_value}
if values["ingress"] == {}:
del values["ingress"]
docs = render_chart(values=values, show_only=["templates/webserver/webserver-ingress.yaml"])
assert expected == (1 == len(docs))
| {
"content_hash": "31d04cc5b5b5d46b146522da26de8122",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 107,
"avg_line_length": 40.78832116788321,
"alnum_prop": 0.4808518253400143,
"repo_name": "danielvdende/incubator-airflow",
"id": "93b51e44dc8f19e7db93f68dbd881d874a924830",
"size": "6374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/charts/test_ingress_web.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from .models import Post
class PostSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Post
fields = ('url', 'title', 'content', 'timestamp', 'updated', 'media_url', 'user')
| {
"content_hash": "da0c0bc3e2d61ef1973bc39df141a8a3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 89,
"avg_line_length": 36.42857142857143,
"alnum_prop": 0.6980392156862745,
"repo_name": "divyamodi128/django_comments",
"id": "2322d04b7d9cd5b7a22c2c5733274bc784a0ffd6",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2408"
},
{
"name": "HTML",
"bytes": "10186"
},
{
"name": "JavaScript",
"bytes": "707"
},
{
"name": "Python",
"bytes": "24645"
}
],
"symlink_target": ""
} |
"""This is the Volume Slicer Actor, to visualize a 3D volumetric image
of the head as slices.
Copyright (c) 2012-2014, Emanuele Olivetti and Eleftherios Garyfallidis
Distributed under the BSD 3-clause license. See COPYING.txt.
"""
import numpy as np
from fos import Window, Scene
from fos.actor.slicer import Slicer
from pyglet.gl import *
from fos.coords import rotation_matrix, from_matvec
from fos import Init, Run
from PySide.QtCore import Qt
import copy
class Guillotine(Slicer):
""" Volume Slicer Actor
Notes
------
Coordinate Systems
http://www.grahamwideman.com/gw/brain/orientation/orientterms.htm
http://www.slicer.org/slicerWiki/index.php/Coordinate_systems
http://eeg.sourceforge.net/mri_orientation_notes.html
"""
def __init__(self, name, data, affine,
convention='RAS', look='anteriorz+'):
""" Volume Slicer that supports medical conventions
Parameters
----------
name : str
data : array, shape (X, Y, Z) or (X, Y, Z, 3) or (X, Y, Z, 4)
affine : array, shape (4, 4)
convention : str,
'RAS' for neurological,
'LAS' for radiological (default)
look : str,
'anteriorz+' look in the subject from the front
"""
data[np.isnan(data)] = 0
data = np.interp(data, [data.min(), data.max()], [0, 255])
data = data.astype(np.ubyte)
"""
if convention == 'RAS' and look == 'anteriorz+':
axis = np.array([1, 0, 0.])
theta = -90.
post_mat = from_matvec(rotation_matrix(axis, theta))
axis = np.array([0, 0, 1.])
theta = -90.
post_mat = np.dot(
from_matvec(rotation_matrix(axis, theta)),
post_mat)
"""
post_mat = np.eye(4)
super(Guillotine, self).__init__(name, data, affine, convention, post_mat)
def right2left(self, step):
if self.i + step < self.I:
self.slice_i(self.i + step)
else:
self.slice_i(self.I - 1)
def left2right(self, step):
if self.i - step >= 0:
self.slice_i(self.i - step)
else:
self.slice_i(0)
def inferior2superior(self, step):
if self.k + step < self.K:
self.slice_k(self.k + step)
else:
self.slice_k(self.K - 1)
def superior2inferior(self, step):
if self.k - step >= 0:
self.slice_k(self.k - step)
else:
self.slice_k(0)
def anterior2posterior(self, step):
if self.j + step < self.J:
self.slice_j(self.j + step)
else:
self.slice_j(self.J - 1)
def posterior2anterior(self, step):
if self.j - step >= 0:
self.slice_j(self.j - step)
else:
self.slice_j(0)
def reset_slices(self):
self.slice_i(self.I / 2)
self.slice_j(self.J / 2)
self.slice_k(self.K / 2)
def slices_ijk(self, i, j, k):
self.slice_i(i)
self.slice_j(j)
self.slice_k(k)
def show_coronal(self, bool=True):
self.show_k = bool
def show_axial(self, bool=True):
self.show_i = bool
def show_saggital(self, bool=True):
self.show_j = bool
def show_all(self, bool=True):
self.show_i = bool
self.show_j = bool
self.show_k = bool
def process_messages(self, messages):
msg=messages['key_pressed']
#print 'Processing messages in actor', self.name,
#' key_press message ', msg
if msg!=None:
self.process_keys(msg,None)
def process_keys(self, symbol, modifiers):
"""Bind actions to key press.
"""
if symbol == Qt.Key_Left:
print 'Left'
if self.i < self.data.shape[0]:
self.slice_i(self.i+1)
else:
self.slice_i(0)
if symbol == Qt.Key_Left:
print 'Left'
if self.i < self.data.shape[0]:
self.slice_i(self.i+1)
else:
self.slice_i(0)
if symbol == Qt.Key_Right:
print 'Right'
if self.i >=0:
self.slice_i(self.i-1)
else:
self.slice_i(self.data.shape[0]-1)
if symbol == Qt.Key_Up:
print 'Superior'
if self.k < self.data.shape[2]:
self.slice_k(self.k+1)
else:
self.slice_k(0)
if symbol == Qt.Key_Down:
print 'Interior'
if self.k >= 0:
self.slice_k(self.k-1)
else:
self.slice_k(self.data.shape[2]-1)
if symbol == Qt.Key_PageUp:
print 'Anterior'
if self.j < self.data.shape[1]:
self.slice_j(self.j+1)
else:
self.slice_j(0)
if symbol == Qt.Key_PageDown:
print 'Posterior'
if self.j >= 0:
self.slice_j(self.j-1)
else:
self.slice_j(self.data.shape[1]-1)
if symbol == Qt.Key_0:
self.show_i = not self.show_i
self.show_j = not self.show_j
self.show_k = not self.show_k
if symbol == Qt.Key_1:
self.show_i = not self.show_i
if symbol == Qt.Key_2:
self.show_j = not self.show_j
if symbol == Qt.Key_3:
self.show_k = not self.show_k
# if symbol == Qt.Key_R:
# self.slice_i(self.I / 2)
# self.slice_j(self.J / 2)
# self.slice_k(self.K / 2)
def anteriorzplus(xyz):
axis = np.array([1, 0, 0.])
theta = -90.
post_mat = from_matvec(rotation_matrix(axis, theta))
axis = np.array([0, 1, 0])
theta = 180.
post_mat = np.dot(
from_matvec(rotation_matrix(axis, theta)),
post_mat)
return np.dot(post_mat[:3, :3], xyz.T).T
| {
"content_hash": "3b4b2988e3bc5e3245d19a580e76bb71",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 82,
"avg_line_length": 28.50467289719626,
"alnum_prop": 0.5042622950819672,
"repo_name": "Paolopost/tractome",
"id": "01a7d7abbde5fd3e60d6c1d871d980f09ba787ff",
"size": "6125",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "guillotine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "161366"
}
],
"symlink_target": ""
} |
from robofab.world import CurrentFont,AllFonts,CurrentGlyph
fonts=AllFonts()
forigen=CurrentFont()
sel=forigen.selection
def copiaWidth(myWidth,gtarget):
anchoFinal=gtarget.width
anchoActual=myWidth
anchoDif=anchoActual-anchoFinal
anchoSide=anchoDif/2
gtarget.leftMargin=gtarget.leftMargin+anchoSide
gtarget.rightMargin=gtarget.rightMargin+anchoSide
gtarget.width=myWidth
print str(myWidth) + " > " + str(gtarget.width)
def inconsistent(list):
result=False
for i in range(len(list)):
if i>0:
if list[i-1]<list[i]:
result=True
return result
incList=[]
for gname in sel:
lsb = []
rsb = []
width = []
for f in fonts:
if gname in f:
destino=f[gname]
lsb.append(destino.leftMargin)
rsb.append(destino.rightMargin)
width.append(destino.width)
#destino.mark=origen.mark
#copiaWidth(origen.width,destino)
print gname
if inconsistent(lsb) or inconsistent(rsb):
print 'INCONSISTENT'
incList.append(gname)
print "LSB"
print lsb
print "RSB"
print rsb
print "WIDTH"
print width
print ""
#f.update()
output=''
print 'Weight Inconsistences: '
for item in incList:
output += '/'+item+' '
print output
print 'Done.'
| {
"content_hash": "637d58c02da64e975bc1f11f1d13f769",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 59,
"avg_line_length": 20.155172413793103,
"alnum_prop": 0.7279726261762189,
"repo_name": "huertatipografica/huertatipografica-fl-scripts",
"id": "18340268ff0e36b14ffbae9fa7f6f11d4205d3b2",
"size": "1241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AT_Information/AT_SelectedSBValuesFonts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97792"
}
],
"symlink_target": ""
} |
import unittest
import json
from data_migrator.emitters.base import BaseEmitter
from data_migrator.emitters import CSVEmitter, JSONEmitter
from data_migrator.emitters import MySQLEmitter, UpdateEmitter
from data_migrator.models import Model, StringField
from data_migrator.exceptions import DefinitionException
class EmitterModel(Model):
a = StringField(pos=0, key=True)
b = StringField(pos=1)
o1 = EmitterModel(a="hello", b="world").save()
o2 = EmitterModel(a="goodbye", b="cruel world").save()
class EmitterHeaderModel(Model):
b = StringField(pos=1)
a = StringField(pos=0)
class Meta:
prefix = ["hello", "world"]
table_name = 'test'
class TestEmitterBase(unittest.TestCase):
def test_base_default(self):
'''Base Emitter defaults'''
b = BaseEmitter(manager=EmitterModel.objects)
self.assertEqual(b.filename(), "emittermodel.txt")
def test_base_fileext(self):
'''Base Emitter defaults'''
b = BaseEmitter(manager=EmitterModel.objects, extension='.sql')
self.assertEqual(b.filename(), "emittermodel.sql")
def test_base_fileext_dot(self):
'''Base Emitter defaults'''
b = BaseEmitter(manager=EmitterModel.objects, extension='sql')
self.assertEqual(b.filename(), "emittermodel.sql")
class MySQLEmitterBase(unittest.TestCase):
def test_start(self):
e = MySQLEmitter(manager=EmitterModel.objects)
self.assertEqual(len(EmitterModel.objects), 2)
self.assertGreater(len(e.preamble(headers=['hello'])), 0)
def test_header(self):
e = MySQLEmitter(manager=EmitterHeaderModel.objects)
h = e.preamble(["first line"])
self.assertIn("hello", h)
self.assertIn("world", h)
def test_emit(self):
e = MySQLEmitter(manager=EmitterHeaderModel.objects)
o = EmitterModel.objects.all()
self.assertEqual(e.emit(o[0]), ['INSERT INTO `test` (`b`, `a`) VALUES ("world", "hello");'])
self.assertEqual(e.emit(o[1]), ['INSERT INTO `test` (`b`, `a`) VALUES ("cruel world", "goodbye");'])
class CSVEmitterBase(unittest.TestCase):
def test_header(self):
e = CSVEmitter(manager=EmitterHeaderModel.objects)
h = e.preamble()
self.assertIn('b, a', h)
def test_emit(self):
e = CSVEmitter(manager=EmitterHeaderModel.objects)
o = EmitterModel.objects.all()
self.assertEqual(e.emit(o[0]), ['"world", "hello"'])
self.assertEqual(e.emit(o[1]), ['"cruel world", "goodbye"'])
class JSONEmitterBase(unittest.TestCase):
def test_emit(self):
e = JSONEmitter(manager=EmitterHeaderModel.objects)
o = EmitterModel.objects.all()
self.assertDictEqual(json.loads(e.emit(o[0])[0]), {"a": "hello", "b": "world"})
self.assertDictEqual(json.loads(e.emit(o[1])[0]), {"a": "goodbye", "b": "cruel world"})
class UpdateEmitterBase(unittest.TestCase):
def test_emit(self):
e = UpdateEmitter(manager=EmitterModel.objects)
o = EmitterModel.objects.all()
self.assertEqual(e.emit(o[0]), ['UPDATE `emittermodel` SET `b` = "world" WHERE `a` = "hello";'])
self.assertEqual(e.emit(o[1]), ['UPDATE `emittermodel` SET `b` = "cruel world" WHERE `a` = "goodbye";'])
def test_fail(self):
self.assertRaises(DefinitionException, UpdateEmitter, manager=EmitterHeaderModel.objects)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "55154db68417adbb746e7f0b37dd7048",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 112,
"avg_line_length": 37.31521739130435,
"alnum_prop": 0.6562773084765511,
"repo_name": "schubergphilis/data-migrator",
"id": "c9dd65b07a8325c6c0d3c087b33a238b06162ffa",
"size": "3480",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_emitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "107709"
}
],
"symlink_target": ""
} |
import hmac
import os
import re
import jinja2
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
secret = "OhCanada"
def jinja_render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PWD_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PWD_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
| {
"content_hash": "dbf4f575e4822f6f844fefe5035547ec",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 20.48936170212766,
"alnum_prop": 0.6469366562824507,
"repo_name": "vincentwang901130/Multi-User-Blog",
"id": "4ffca03f136961d2f8a4a02ff3054e1e96a739e7",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "10198"
},
{
"name": "Python",
"bytes": "20438"
}
],
"symlink_target": ""
} |
import pytest
import time
import os
from contextlib import contextmanager
from .test_base_class import TestBaseClass
test_memleak = int(os.environ.get("TEST_MEMLEAK", 0))
if test_memleak != 1:
if pytest.__version__ < "3.0.0":
pytest.skip("Skip memleak tests")
else:
pytestmark = pytest.mark.skip
test_memleak_loop = int(os.environ.get("TEST_MEMLEAK_LOOP", 1))
@contextmanager
def open_as_connection(config):
"""
Context manager to let us open aerospike connections with
specified config
"""
as_connection = TestBaseClass.get_new_connection(config)
# Connection is setup, so yield it
yield as_connection
# close the connection
as_connection.close()
# adds cls.connection_config to this class
@pytest.mark.usefixtures("connection_config")
class TestConnectLeak(object):
def test_connect_leak(self):
"""
Invoke connect() with positive parameters.
"""
# first_ref_count = sys.gettotalrefcount()
# last_ref_count = first_ref_count
# print("-----start gettotalrefcount: " + str(first_ref_count))
config = self.connection_config.copy()
i = 0
while i < test_memleak_loop:
with open_as_connection(config) as client:
assert client is not None
assert client.is_connected()
time.sleep(0.1)
i = i + 1
# last_ref_count = sys.gettotalrefcount()
# print("-----outstanding gettotalrefcount: " + str(last_ref_count-first_ref_count))
| {
"content_hash": "1ba8183ec0aec04ef5889f8aaa93e769",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 92,
"avg_line_length": 29.22641509433962,
"alnum_prop": 0.6371852808263396,
"repo_name": "aerospike/aerospike-client-python",
"id": "59d6781618fbfd73d9dc6705fd9cd6e29ce7cc2c",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/new_tests/test_connect_memleak.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1347774"
},
{
"name": "Lua",
"bytes": "6526"
},
{
"name": "Python",
"bytes": "2103805"
},
{
"name": "Shell",
"bytes": "6716"
}
],
"symlink_target": ""
} |
"""
This is the main ``urlconf`` for Mezzanine - it sets up patterns for
all the various Mezzanine apps, third-party apps like Grappelli and
filebrowser.
"""
from __future__ import unicode_literals
from future.builtins import str
from django.conf.urls import include, url
from django.contrib.sitemaps.views import sitemap
from django.views.i18n import javascript_catalog
from django.http import HttpResponse
from mezzanine.conf import settings
from mezzanine.core.sitemaps import DisplayableSitemap
urlpatterns = []
# JavaScript localization feature
js_info_dict = {'domain': 'django'}
urlpatterns += [
url(r'^jsi18n/(?P<packages>\S+?)/$', javascript_catalog, js_info_dict),
]
if settings.DEBUG and "debug_toolbar" in settings.INSTALLED_APPS:
try:
import debug_toolbar
except ImportError:
pass
else:
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
# Django's sitemap app.
if "django.contrib.sitemaps" in settings.INSTALLED_APPS:
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
urlpatterns += [
url("^sitemap\.xml$", sitemap, sitemaps),
]
# Return a robots.txt that disallows all spiders when DEBUG is True.
if getattr(settings, "DEBUG", False):
urlpatterns += [
url("^robots.txt$", lambda r: HttpResponse("User-agent: *\nDisallow: /",
content_type="text/plain")),
]
# Miscellanous Mezzanine patterns.
urlpatterns += [
url("^", include("mezzanine.core.urls")),
url("^", include("mezzanine.generic.urls")),
]
# Mezzanine's Accounts app
if "mezzanine.accounts" in settings.INSTALLED_APPS:
# We don't define a URL prefix here such as /account/ since we want
# to honour the LOGIN_* settings, which Django has prefixed with
# /account/ by default. So those settings are used in accounts.urls
urlpatterns += [
url("^", include("mezzanine.accounts.urls")),
]
# Mezzanine's Blog app.
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
BLOG_SLUG = settings.BLOG_SLUG.rstrip("/") + "/"
blog_patterns = [
url("^%s" % BLOG_SLUG, include("mezzanine.blog.urls")),
]
urlpatterns += blog_patterns
# Mezzanine's Pages app.
PAGES_SLUG = ""
if "mezzanine.pages" in settings.INSTALLED_APPS:
# No BLOG_SLUG means catch-all patterns belong to the blog,
# so give pages their own prefix and inject them before the
# blog urlpatterns.
if blog_installed and not BLOG_SLUG.rstrip("/"):
PAGES_SLUG = getattr(settings, "PAGES_SLUG", "pages").strip("/") + "/"
blog_patterns_start = urlpatterns.index(blog_patterns[0])
urlpatterns[blog_patterns_start:len(blog_patterns)] = [
url("^%s" % str(PAGES_SLUG), include("mezzanine.pages.urls")),
]
else:
urlpatterns += [
url("^", include("mezzanine.pages.urls")),
]
| {
"content_hash": "37002b7fbf069d81c432833fb5a21306",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 80,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.6553480475382003,
"repo_name": "viaregio/mezzanine",
"id": "b6685160f6e7cd9e164d9e9b45bfbdad8e9d57be",
"size": "2945",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60127"
},
{
"name": "HTML",
"bytes": "89327"
},
{
"name": "JavaScript",
"bytes": "453729"
},
{
"name": "Python",
"bytes": "659594"
}
],
"symlink_target": ""
} |
import django
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
VOLUME_INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_VOLUMES_TAB_URL = reverse('horizon:project:volumes:volumes_tab')
SEARCH_OPTS = dict(status=api.cinder.VOLUME_STATE_AVAILABLE)
class VolumeViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_without_name(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': '',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_get',
'volume_get',
'volume_type_list'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'volume_snapshot_get',
'volume_snapshot_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A copy of a volume',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
cinder.volume_list(IsA(http.HttpRequest), search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volumes.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest),
volume.id).AndReturn(self.cinder_volumes.first())
cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones').AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
redirect_url = VOLUME_VOLUMES_TAB_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_snapshot_get',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_type_list',
'volume_type_default',
'volume_get'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'volume_list',
'volume_snapshot_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)) \
.AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_type_list',
'volume_type_default',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 1, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
# in django 1.6 filesizeformat replaces all spaces with
# non-breaking space characters
if django.VERSION >= (1, 6):
msg = (u"The volume size cannot be less than the "
u"image size (20.0\xa0GB)")
else:
msg = (u"The volume size cannot be less than the "
u"image size (20.0 GB)")
self.assertFormError(res, 'form', None, msg)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def _test_create_volume_from_image_under_image_min_disk_size(self, image):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 5, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GB)")
def test_create_volume_from_image_under_image_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
def test_create_volume_from_image_under_image_property_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 0
image.properties['min_disk'] = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_type_default',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_gb_used_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 80,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 20GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_number_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': len(self.cinder_volumes.list())}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.volume_delete(IsA(http.HttpRequest), volume.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_delete_volume_with_snap_no_action_item(self):
volume = self.cinder_volumes.get(name='Volume name')
setattr(volume, 'has_snapshot', True)
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertNotContains(res, 'Delete Volume')
self.assertNotContains(res, 'delete')
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_list',)})
def test_edit_attachments_attached_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_create_snapshot_button_disabled_when_quota_exceeded(self):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).AndReturn(limits)
self.mox.ReplayAll()
create_link = tables.CreateSnapshot()
url = reverse(create_link.get_link_url(), args=[volume.id])
res_url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class=\"%s disabled\" "\
"id=\"volumes__row_%s__action_snapshots\">%s</a>" \
% (url, " ".join(classes), volume.id, link_name)
self.assertContains(
res, expected_string, html=True,
msg_prefix="The create snapshot button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',),
api.nova: ('server_list',)})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.cinder_volumes.list())
create_link = tables.CreateVolume()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='volumes__action_create' data-update-url=" \
"'/project/volumes/?action=create&table=volumes'> "\
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',),
api.nova: ('server_get',)})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<h1>Volume Details: Volume name</h1>",
1, 200)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_encrypted(self):
enc_meta = self.cinder_volume_encryption.first()
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.volume_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.provider, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.control_location, 1,
200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.cipher, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.key_size, 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_unencrypted(self):
enc_meta = self.cinder_volume_encryption.list()[1]
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<h3>Volume is Unencrypted</h3>", 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_get_data(self):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
@test.create_stubs({cinder: ('volume_get',)})
def test_detail_view_with_exception(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_without_name(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
'',
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': '',
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_bootable_flag(self):
volume = self.cinder_bootable_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
'update bootable flag')
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
True)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': 'update bootable flag',
'bootable': True}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_upload_to_image',
'volume_get')})
def test_upload_to_image(self):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_upload_to_image(
IsA(http.HttpRequest),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format']).AndReturn(loaded_resp)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_extend'),
quotas: ('tenant_limit_usages',)})
def test_extend_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 120}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_extend(IsA(http.HttpRequest),
volume.id,
formData['new_size']).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_wrong_size(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormErrors(res, 1,
"New size must be greater than "
"current size.")
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_retype_volume_supported_action_item(self):
volume = self.cinder_volumes.get(name='v2_volume')
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
@test.create_stubs({cinder: ('volume_get',
'volume_retype',
'volume_type_list')})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
cinder.volume_retype(
IsA(http.HttpRequest),
volume.id,
form_data['volume_type'],
form_data['migration_policy']).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_stubs({cinder: ('volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def _test_encryption(self, encryption):
volumes = self.volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes('backup_supported').AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn(self.cinder_volume_snapshots.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
rows = res.context['volumes_table'].get_rows()
if encryption:
column_value = 'Yes'
else:
column_value = 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_size_out_of_quota(self):
volume = self.volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 1000}
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, "form", "new_size",
"Volume cannot be extended to 1000GB as you only "
"have 80GB of your quota available.")
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_create_transfer_availability(self):
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
table = res.context['volumes_table']
# Verify that the create transfer action is present if and only if
# the volume is available
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('create_transfer' in actions,
vol.status == 'available')
@test.create_stubs({cinder: ('transfer_create',)})
def test_create_transfer(self):
volumes = self.volumes.list()
volToTransfer = [v for v in volumes if v.status == 'available'][0]
formData = {'volume_id': volToTransfer.id,
'name': u'any transfer name'}
cinder.transfer_create(IsA(http.HttpRequest),
formData['volume_id'],
formData['name']).AndReturn(
self.cinder_volume_transfers.first())
self.mox.ReplayAll()
# Create a transfer for the first available volume
url = reverse('horizon:project:volumes:volumes:create_transfer',
args=[volToTransfer.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'transfer_delete',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_delete_transfer(self):
transfer = self.cinder_volume_transfers.first()
volumes = []
# Attach the volume transfer to the relevant volume
for v in self.cinder_volumes.list():
if v.id == transfer.volume_id:
v.status = 'awaiting-transfer'
v.transfer = transfer
volumes.append(v)
formData = {'action':
'volumes__delete_transfer__%s' % transfer.volume_id}
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.transfer_delete(IsA(http.HttpRequest), transfer.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.assertIn('Successfully deleted volume transfer "test transfer"',
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('transfer_accept',)})
def test_accept_transfer(self):
transfer = self.cinder_volume_transfers.first()
cinder.transfer_accept(IsA(http.HttpRequest), transfer.id,
transfer.auth_key)
self.mox.ReplayAll()
formData = {'transfer_id': transfer.id, 'auth_key': transfer.auth_key}
url = reverse('horizon:project:volumes:volumes:accept_transfer')
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
| {
"content_hash": "5f3526f9522873a0ad0abd9bebcdf225",
"timestamp": "",
"source": "github",
"line_count": 1585,
"max_line_length": 79,
"avg_line_length": 44.333753943217665,
"alnum_prop": 0.528355320269251,
"repo_name": "damien-dg/horizon",
"id": "6972ed4f41954489d2f1fee199a9720ceb6631ec",
"size": "71033",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/volumes/volumes/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105413"
},
{
"name": "HTML",
"bytes": "513351"
},
{
"name": "JavaScript",
"bytes": "955324"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4813652"
},
{
"name": "Shell",
"bytes": "18658"
}
],
"symlink_target": ""
} |
from discord.ext import commands
import discord
from ext_module import ExtModule
from ext_module import PmForbidden
class AdminCog:
"""This cog contains commands that can be used by the bots admin(s).
This will not contain commands, which ban and kick a user or let the bot behave as a server admin.
"""
def __init__(self, bot: commands.Bot, log_channel_id: int=None, activity: str=''):
"""The constructor for the AdminCog class, it assigns the important variables used by the commands below
Args:
bot: The bot the commands will be added to (commands.Bot)
log_channel_id: The id of the log_channel (int)
"""
self.bot = bot
self.log_channel_id = log_channel_id
self.send_log = None # will be assigned in the on_ready event
self.activity = activity
async def on_resumed(self):
"""Is called when the bot made a successfull reconnect, after disconnecting
"""
await self.send_log("Restarted successfully")
game = discord.Game(name=self.activity)
await self.bot.change_presence(game=game)
async def on_ready(self):
"""Is called when the bot is completely started up. Calls in this function need variables only a started bot can give.
"""
self.send_log = ExtModule.get_send_log(self)
game = discord.Game(name=self.activity)
await self.bot.change_presence(game=game)
async def on_guild_join(self, guild: discord.Guild):
"""Is called when the bot joins a new guild. Sends an informative message to the log_channel
Args:
guild: The guild which the bot joined on (discord.Guild)
"""
await self.send_log('Joined guild: ' + guild.name + '(' + str(guild.id) + ')')
async def on_guild_remove(self, guild: discord.Guild):
"""Is called when the bot leaves a guild. Sends an informative message to the log_channel
Args:
guild: The guild which was left by the bot (discord.Guild)
"""
await self.send_log('Left guild: ' + guild.name + '(' + str(guild.id) + ')')
@commands.command(name='serverlist',
aliases=['list'],
description='Prints a list of all the servers'
' this bot is a member of to the admin log_channel')
@ExtModule.is_admin()
@ExtModule.reaction_respond
async def serverlist(self, ctx: commands.Context):
"""This function sends a list with all the servers this bot is a member of to the self.log_channel
Args:
ctx: The context of the command, which is mandatory in rewrite
"""
_guild_names = 'List of all guilds: '
for guild in self.bot.guilds:
if len(_guild_names) + len(guild.name) > 1800: # not accurate, 200 literals buffer catch it
await self.send_log(_guild_names[:-2])
_guild_names = ''
else:
_guild_names = _guild_names + guild.name + '(' + str(guild.id) + ')' + ', '
await self.send_log(_guild_names[:-2])
@commands.command(name='leave',
description='(ID) || The bot will attempt to leave the server with the given ID.')
@ExtModule.is_admin()
@ExtModule.reaction_respond
async def leave(self, ctx: commands.Context, guild_id: int=None):
"""This commands makes the bot leave the server with the given ID
Args:
ctx: The context of the command, which is mandatory in rewrite (commands.Context)
guild_id: The id of the server, which will be left (int)
"""
guild = self.bot.get_guild(int(guild_id))
try:
await guild.leave()
except discord.HTTPException:
await self.send_log('Could not leave guild ' + guild.name)
raise discord.DiscordException
except AttributeError:
await self.send_log('Guild not found ' + guild.name)
raise discord.DiscordException
else:
await self.send_log('Left guild successfully ' + guild.name)
@commands.command(name='sendtoall',
aliases=['send_to_all', 'send-to-all', 'broadcast'],
description='(textblock) || The bot will attempt to send the textblock to every server'
' he is a member of. Do NOT use for spamming purposes.')
@ExtModule.is_admin()
@ExtModule.reaction_respond
async def sendtoall(self, ctx: commands.Context, *args):
"""This command tries to send a message to all guilds this bot is a member of.
Args:
ctx: The context of the command, which is mandatory in rewrite (commands.Context)
args: The words of the message to be send
"""
message = ''
for word in args:
message = message + str(word) + ' '
message = message[:-1]
for guild in self.bot.guilds:
_channel = guild.text_channels[0]
_maximum = max([len(channel.members) for channel in guild.text_channels])
for channel in guild.text_channels:
if len(channel.members) == _maximum:
_channel = channel
break # take the topmost channel with most members reading it
try:
await _channel.send(message)
except discord.Forbidden:
await self.send_log('Missing permissions for guild ' + guild.name)
except discord.HTTPException:
await self.send_log('Failed to send message to ' + guild.name + ' with a connection error')
else:
await self.send_log('Successfully send the message to guild ' + guild.name)
@commands.command(name='adminhelp',
aliases=['admin-help', 'admin_help', 'helpadmin'],
description='Sends you the names, aliases and description of all commands per PM!')
@ExtModule.is_admin()
@ExtModule.reaction_respond
async def adminhelp(self, ctx: commands.Context):
"""This function sends a list of all the admin commands + aliases + description to the requester
Args:
ctx: The context of the command, which is mandatory in rewrite (commands.Context)
"""
_help_string = 'command name || (aliases): || arguments || help description\n'
for command in self.bot.commands:
if ExtModule.is_admin_predicate not in command.checks:
continue
_command_help = ExtModule._help(command)
if len(_help_string) + len(_command_help) > 1800:
try:
await ctx.message.author.send('```\n' + _help_string + '\n```')
except discord.DiscordException:
raise PmForbidden
_help_string = 'command name || (aliases): || help description\n\n' + _command_help
else:
_help_string = _help_string + '\n\n' + _command_help
try:
await ctx.author.send('```\n' + _help_string + '\n```')
except discord.DiscordException:
raise PmForbidden
@commands.command(name='change_activity',
aliases=['change_game'],
description='Changes the activity in the activity feed of the bot')
@ExtModule.is_admin()
@ExtModule.reaction_respond
async def change_activity(self, ctx: commands.Context, *args):
"""This function changes sets the activity in the activity feed of the bot to the words delivered in args
Args:
*args: The words of the activity
ctx: The context of the command, which is mandatory in rewrite
"""
activity = ''
for word in args:
activity = activity + ' ' + str(word)
game = discord.Game(name=activity)
await self.bot.change_presence(game=game)
self.activity = activity
await self.send_log('Changed activity to: ' + activity)
| {
"content_hash": "4522c5bf921f12a895c42aea583b88f6",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 126,
"avg_line_length": 48.1301775147929,
"alnum_prop": 0.5906073272682567,
"repo_name": "JonSnowWhite/discord-soundboard-bot",
"id": "c9f4d06b0cdb0ac4b5eb620146fe1e91f7275163",
"size": "8134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/admin_cog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27153"
}
],
"symlink_target": ""
} |
from enum import Enum
class TierType(Enum):
consumption = "Consumption"
commitment_100_au_hours = "Commitment_100AUHours"
commitment_500_au_hours = "Commitment_500AUHours"
commitment_1000_au_hours = "Commitment_1000AUHours"
commitment_5000_au_hours = "Commitment_5000AUHours"
commitment_10000_au_hours = "Commitment_10000AUHours"
commitment_50000_au_hours = "Commitment_50000AUHours"
commitment_100000_au_hours = "Commitment_100000AUHours"
commitment_500000_au_hours = "Commitment_500000AUHours"
class FirewallState(Enum):
enabled = "Enabled"
disabled = "Disabled"
class FirewallAllowAzureIpsState(Enum):
enabled = "Enabled"
disabled = "Disabled"
class AADObjectType(Enum):
user = "User"
group = "Group"
service_principal = "ServicePrincipal"
class DataLakeAnalyticsAccountStatus(Enum):
failed = "Failed"
creating = "Creating"
running = "Running"
succeeded = "Succeeded"
patching = "Patching"
suspending = "Suspending"
resuming = "Resuming"
deleting = "Deleting"
deleted = "Deleted"
undeleting = "Undeleting"
canceled = "Canceled"
class DataLakeAnalyticsAccountState(Enum):
active = "Active"
suspended = "Suspended"
class SubscriptionState(Enum):
registered = "Registered"
suspended = "Suspended"
deleted = "Deleted"
unregistered = "Unregistered"
warned = "Warned"
class OperationOrigin(Enum):
user = "user"
system = "system"
usersystem = "user,system"
| {
"content_hash": "e4dc6ba1f0fc365ba1cf150cdb738cd0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 59,
"avg_line_length": 21.785714285714285,
"alnum_prop": 0.6904918032786885,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "a69cfaa147fc05df7a3f1f59be95dbab7cd3b583",
"size": "1999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/data_lake_analytics_account_management_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.utils import timezone
TIME_FORMAT_STRING = '%Y-%m%d-%H%M'
def get_datetime_string_for_file():
"""Get datetime string for file naming"""
return timezone.now().strftime(TIME_FORMAT_STRING)
def get_last_microsecond():
"""Used as a url param to clear any caching"""
return datetime.now().microsecond
def get_last_microsecond_url_param():
"""Used as a url param to clear any caching"""
return 'ts=%d' % datetime.now().microsecond
| {
"content_hash": "9cad593982ecbf6cb889743a313e2eb0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 30.9375,
"alnum_prop": 0.701010101010101,
"repo_name": "IQSS/geoconnect",
"id": "414ebae8bf4c395dfebec480ecda3fbfcdcb1302",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gc_apps/geo_utils/time_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35936"
},
{
"name": "HTML",
"bytes": "83429"
},
{
"name": "JavaScript",
"bytes": "17942"
},
{
"name": "Python",
"bytes": "539011"
},
{
"name": "Shell",
"bytes": "1224"
}
],
"symlink_target": ""
} |
ModuleName = "fibaro_motion_sensor"
BATTERY_CHECK_INTERVAL = 10800 # How often to check battery (secs) = 3 hours
SENSOR_POLL_INTERVAL = 600 # How often to request sensor values = 10 mins
TIME_CUTOFF = 1800 # Data older than this is considered "stale"
import sys
import time
import json
import os
from pprint import pprint
from cbcommslib import CbAdaptor
from cbconfig import *
from twisted.internet import threads
from twisted.internet import reactor
class Adaptor(CbAdaptor):
def __init__(self, argv):
self.status = "ok"
self.state = "stopped"
self.apps = {"binary_sensor": [],
"temperature": [],
"luminance": [],
"battery": [],
"connected": []}
self.lastTemperatureTime = 0
self.lastHumidityTime = 0
self.lastLuminanceTime = 0
self.lastBinaryTime = 0
self.lastBatteryTime = 0
# super's __init__ must be called:
#super(Adaptor, self).__init__(argv)
CbAdaptor.__init__(self, argv)
def setState(self, action):
#self.cbLog("debug", "setting state to: " + action)
# error is only ever set from the running state, so set back to running if error is cleared
if action == "error":
self.state == "error"
elif action == "clear_error":
self.state = "running"
else:
self.state = action
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def sendCharacteristic(self, characteristic, data, timeStamp):
msg = {"id": self.id,
"content": "characteristic",
"characteristic": characteristic,
"data": data,
"timeStamp": timeStamp}
for a in self.apps[characteristic]:
self.sendMessage(msg, a)
def checkBattery(self):
self.cbLog("debug", "checkBattery")
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "128",
"action": "Get",
"value": ""
}
self.sendZwaveMessage(cmd)
reactor.callLater(BATTERY_CHECK_INTERVAL, self.checkBattery)
def pollSensors(self):
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "49",
"action": "Get",
"value": ""
}
self.sendZwaveMessage(cmd)
reactor.callLater(SENSOR_POLL_INTERVAL, self.pollSensors)
def forceInterview(self):
self.cbLog("debug", "forceInterview")
cmd = {"id": self.id,
"request": "force_interview",
"address": self.addr
}
self.sendZwaveMessage(cmd)
def checkConnected(self):
self.cbLog("debug", "checkConnected, updateTime: " + str(self.updateTime) + ", lastUpdateTime: " + str(self.lastUpdateTime))
if self.updateTime == self.lastUpdateTime:
self.connected = False
else:
self.connected = True
self.sendCharacteristic("connected", self.connected, time.time())
self.lastUpdateTime = self.updateTime
reactor.callLater(SENSOR_POLL_INTERVAL * 2, self.checkConnected)
def onZwaveMessage(self, message):
self.cbLog("debug", "onZwaveMessage, message: " + str(json.dumps(message, indent=4)))
if message["content"] == "init":
self.updateTime = 0
self.lastUpdateTime = time.time()
# Alarm command class
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "48",
"value": "1"
}
self.sendZwaveMessage(cmd)
# Temperature
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "49",
"value": "1"
}
self.sendZwaveMessage(cmd)
# luminance
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "49",
"value": "3"
}
self.sendZwaveMessage(cmd)
# Battery
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "128"
}
self.sendZwaveMessage(cmd)
# Associate PIR alarm with this controller
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "133",
"action": "Set",
"value": "1,1"
}
self.sendZwaveMessage(cmd)
# Associate temperature/luminance with this controller
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "133",
"action": "Set",
"value": "2,1"
}
self.sendZwaveMessage(cmd)
# Associate temperature/luminance with this controller
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "133",
"action": "Set",
"value": "3,1"
}
self.sendZwaveMessage(cmd)
# Turn off LED for motion
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "112",
"action": "Set",
"value": "80,0,1"
}
self.sendZwaveMessage(cmd)
# Turn off LED for tamper
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "112",
"action": "Set",
"value": "89,0,1"
}
self.sendZwaveMessage(cmd)
# Change motion cancellation delay from 30s to 60s
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "112",
"action": "Set",
"value": "6,60,2"
}
self.sendZwaveMessage(cmd)
# Wakeup every 5 minutes
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "132",
"action": "Set",
"value": "300,1"
}
self.sendZwaveMessage(cmd)
reactor.callLater(300, self.checkBattery)
reactor.callLater(30, self.pollSensors)
reactor.callLater(300, self.checkConnected)
elif message["content"] == "data":
try:
if message["commandClass"] == "49":
if message["value"] == "1":
temperature = message["data"]["val"]["value"]
updateTime = message["data"]["val"]["updateTime"]
# Only send if we don't already have an update from this time and the update is recent (not stale after restart)
if updateTime != self.lastTemperatureTime and time.time() - updateTime < TIME_CUTOFF:
self.cbLog("debug", "onZwaveMessage, temperature: " + str(temperature))
self.sendCharacteristic("temperature", temperature, updateTime)
self.lastTemperatureTime = updateTime
elif message["value"] == "3":
luminance = message["data"]["val"]["value"]
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastLuminanceTime and time.time() - updateTime < TIME_CUTOFF:
self.cbLog("debug", "onZwaveMessage, luminance: " + str(luminance))
self.sendCharacteristic("luminance", luminance, time.time())
self.lastLuminanceTime = updateTime
elif message["value"] == "5":
humidity = message["data"]["val"]["value"]
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastHumidityTime and time.time() - updateTime < TIME_CUTOFF:
self.cbLog("debug", "onZwaveMessage, humidity: " + str(humidity))
self.sendCharacteristic("humidity", humidity, time.time())
self.lastHumidityTime = updateTime
elif message["commandClass"] == "48":
if message["value"] == "1":
updateTime = message["data"]["level"]["updateTime"]
if updateTime != self.lastBinaryTime and time.time() - updateTime < TIME_CUTOFF:
if message["data"]["level"]["value"]:
b = "on"
else:
b = "off"
self.cbLog("debug", "onZwaveMessage, alarm: " + b)
self.sendCharacteristic("binary_sensor", b, time.time())
self.lastBinaryTime = updateTime
elif message["commandClass"] == "128":
updateTime = message["data"]["last"]["updateTime"]
if (updateTime != self.lastBatteryTime) and (time.time() - updateTime < TIME_CUTOFF):
battery = message["data"]["last"]["value"]
self.cbLog("debug", "battery: " + str(battery))
msg = {"id": self.id,
"status": "battery_level",
"battery_level": battery}
self.sendManagerMessage(msg)
self.sendCharacteristic("battery", battery, time.time())
self.lastBatteryTime = updateTime
self.updateTime = message["data"]["updateTime"]
except Exception as ex:
self.cbLog("warning", "onZwaveMessage, unexpected message: " + str(message))
self.cbLog("warning", "Exception: " + str(type(ex)) + str(ex.args))
def onAppInit(self, message):
self.cbLog("debug", "onAppInit, message: " + str(message))
resp = {"name": self.name,
"id": self.id,
"status": "ok",
"service": [{"characteristic": "binary_sensor", "interval": 0, "type": "pir"},
{"characteristic": "temperature", "interval": 600},
{"characteristic": "luminance", "interval": 600},
{"characteristic": "battery", "interval": 600},
{"characteristic": "connected", "interval": 600}],
"content": "service"}
self.sendMessage(resp, message["id"])
self.setState("running")
def onAppRequest(self, message):
# Switch off anything that already exists for this app
for a in self.apps:
if message["id"] in self.apps[a]:
self.apps[a].remove(message["id"])
# Now update details based on the message
for f in message["service"]:
if message["id"] not in self.apps[f["characteristic"]]:
self.apps[f["characteristic"]].append(message["id"])
self.cbLog("debug", "apps: " + str(self.apps))
def onAppCommand(self, message):
if "data" not in message:
self.cbLog("warning", "app message without data: " + str(message))
else:
self.cbLog("warning", "This is a sensor. Message not understood: " + str(message))
def onAction(self, action):
self.cbLog("debug", "onAction")
if action == "interview":
self.forceInterview()
else:
self.cbLog("warning", "onAction. Unrecognised action: " + str(action))
def onConfigureMessage(self, config):
#self.cbLog("debug", "onConfigureMessage, config: " + str(config))
"""Config is based on what apps are to be connected.
May be called again if there is a new configuration, which
could be because a new app has been added.
"""
self.setState("starting")
if __name__ == '__main__':
Adaptor(sys.argv)
| {
"content_hash": "d5547133fb3858b4b9ee9bbe4974b4c8",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 136,
"avg_line_length": 43.54807692307692,
"alnum_prop": 0.4678001030396703,
"repo_name": "ContinuumBridge/fibaro_motion_sensor",
"id": "5d92a16869535f953041467445381ba97e9b827d",
"size": "13739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fibaro_motion_sensor_a.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14059"
}
],
"symlink_target": ""
} |
"""
Shared utility functions which are not specific to any particular module.
"""
from __future__ import absolute_import
import contextlib
import copy
import inspect
import sys
from functools import wraps
import packaging.version
import six
from .environment import MYPY_RUNNING
# format: off
six.add_move(
six.MovedAttribute("Callable", "collections", "collections.abc")
) # type: ignore # noqa
from six.moves import Callable # type: ignore # isort:skip # noqa
# format: on
if MYPY_RUNNING:
from types import ModuleType
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
TShimmedPath = TypeVar("TShimmedPath")
TShimmedPathCollection = TypeVar("TShimmedPathCollection")
TShim = Union[TShimmedPath, TShimmedPathCollection]
TShimmedFunc = Union[TShimmedPath, TShimmedPathCollection, Callable, Type]
STRING_TYPES = (str,)
if sys.version_info < (3, 0):
STRING_TYPES = STRING_TYPES + (unicode,) # noqa:F821
class BaseMethod(Callable):
def __init__(self, func_base, name, *args, **kwargs):
# type: (Callable, str, Any, Any) -> None
self.func = func_base
self.__name__ = self.__qualname__ = name
def __call__(self, *args, **kwargs):
# type: (Any, Any) -> Any
return self.func(*args, **kwargs)
class BaseClassMethod(Callable):
def __init__(self, func_base, name, *args, **kwargs):
# type: (Callable, str, Any, Any) -> None
self.func = func_base
self.__name__ = self.__qualname__ = name
def __call__(self, cls, *args, **kwargs):
# type: (Type, Any, Any) -> Any
return self.func(*args, **kwargs)
def make_method(fn):
# type: (Callable) -> Callable
@wraps(fn)
def method_creator(*args, **kwargs):
# type: (Any, Any) -> Callable
return BaseMethod(fn, *args, **kwargs)
return method_creator
def make_classmethod(fn):
# type: (Callable) -> Callable
@wraps(fn)
def classmethod_creator(*args, **kwargs):
# type: (Any, Any) -> Callable
return classmethod(BaseClassMethod(fn, *args, **kwargs))
return classmethod_creator
def memoize(obj):
# type: (Any) -> Callable
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
@memoize
def _parse(version):
# type: (str) -> Tuple[int, ...]
if isinstance(version, STRING_TYPES):
return tuple((int(i) for i in version.split(".")))
return version
@memoize
def parse_version(version):
# type: (str) -> packaging.version._BaseVersion
if not isinstance(version, STRING_TYPES):
raise TypeError("Can only derive versions from string, got {0!r}".format(version))
return packaging.version.parse(version)
@memoize
def split_package(module, subimport=None):
# type: (str, Optional[str]) -> Tuple[str, str]
"""
Used to determine what target to import.
Either splits off the final segment or uses the provided sub-import to return a
2-tuple of the import path and the target module or sub-path.
:param str module: A package to import from
:param Optional[str] subimport: A class, function, or subpackage to import
:return: A 2-tuple of the corresponding import package and sub-import path
:rtype: Tuple[str, str]
:Example:
>>> from pip_shims.utils import split_package
>>> split_package("pip._internal.req.req_install", subimport="InstallRequirement")
("pip._internal.req.req_install", "InstallRequirement")
>>> split_package("pip._internal.cli.base_command")
("pip._internal.cli", "base_command")
"""
package = None
if subimport:
package = subimport
else:
module, _, package = module.rpartition(".")
return module, package
def get_method_args(target_method):
# type: (Callable) -> Tuple[Callable, Optional[inspect.Arguments]]
"""
Returns the arguments for a callable.
:param Callable target_method: A callable to retrieve arguments for
:return: A 2-tuple of the original callable and its resulting arguments
:rtype: Tuple[Callable, Optional[inspect.Arguments]]
"""
inspected_args = None
try:
inspected_args = inspect.getargs(target_method.__code__)
except AttributeError:
target_func = getattr(target_method, "__func__", None)
if target_func is not None:
inspected_args = inspect.getargs(target_func.__code__)
else:
target_func = target_method
return target_func, inspected_args
def set_default_kwargs(basecls, method, *args, **default_kwargs):
# type: (Union[Type, ModuleType], Callable, Any, Any) -> Union[Type, ModuleType] # noqa
target_method = getattr(basecls, method, None)
if target_method is None:
return basecls
target_func, inspected_args = get_method_args(target_method)
if inspected_args is not None:
pos_args = inspected_args.args
else:
pos_args = []
# Spit back the base class if we can't find matching arguments
# to put defaults in place of
if not any(arg in pos_args for arg in list(default_kwargs.keys())):
return basecls
prepended_defaults = tuple() # type: Tuple[Any, ...]
# iterate from the function's argument order to make sure we fill this
# out in the correct order
for arg in args:
prepended_defaults += (arg,)
for arg in pos_args:
if arg in default_kwargs:
prepended_defaults = prepended_defaults + (default_kwargs[arg],)
if not prepended_defaults:
return basecls
if six.PY2 and inspect.ismethod(target_method):
new_defaults = prepended_defaults + target_func.__defaults__
target_method.__func__.__defaults__ = new_defaults
else:
new_defaults = prepended_defaults + target_method.__defaults__
target_method.__defaults__ = new_defaults
setattr(basecls, method, target_method)
return basecls
def ensure_function(parent, funcname, func):
# type: (Union[ModuleType, Type, Callable, Any], str, Callable) -> Callable
"""Given a module, a function name, and a function object, attaches the given
function to the module and ensures it is named properly according to the provided
argument
:param Any parent: The parent to attack the function to
:param str funcname: The name to give the function
:param Callable func: The function to rename and attach to **parent**
:returns: The function with its name, qualname, etc set to mirror **parent**
:rtype: Callable
"""
qualname = funcname
if parent is None:
parent = __module__ # type: ignore # noqa:F821
parent_is_module = inspect.ismodule(parent)
parent_is_class = inspect.isclass(parent)
module = None
if parent_is_module:
module = parent.__name__
elif parent_is_class:
qualname = "{0}.{1}".format(parent.__name__, qualname)
module = getattr(parent, "__module__", None)
else:
module = getattr(parent, "__module__", None)
try:
func.__name__ = funcname
except AttributeError:
if getattr(func, "__func__", None) is not None:
func = func.__func__
func.__name__ = funcname
func.__qualname__ = qualname
func.__module__ = module
return func
def add_mixin_to_class(basecls, mixins):
# type: (Type, List[Type]) -> Type
"""
Given a class, adds the provided mixin classes as base classes and gives a new class
:param Type basecls: An initial class to generate a new class from
:param List[Type] mixins: A list of mixins to add as base classes
:return: A new class with the provided mixins as base classes
:rtype: Type[basecls, *mixins]
"""
if not any(mixins):
return basecls
base_dict = basecls.__dict__.copy()
class_tuple = (basecls,) # type: Tuple[Type, ...]
for mixin in mixins:
if not mixin:
continue
mixin_dict = mixin.__dict__.copy()
base_dict.update(mixin_dict)
class_tuple = class_tuple + (mixin,)
base_dict.update(basecls.__dict__)
return type(basecls.__name__, class_tuple, base_dict)
def fallback_is_file_url(link):
# type: (Any) -> bool
return link.url.lower().startswith("file:")
def fallback_is_artifact(self):
# type: (Any) -> bool
return not getattr(self, "is_vcs", False)
def fallback_is_vcs(self):
# type: (Any) -> bool
return not getattr(self, "is_artifact", True)
def resolve_possible_shim(target):
# type: (TShimmedFunc) -> Optional[Union[Type, Callable]]
if target is None:
return target
if getattr(target, "shim", None):
return target.shim()
return target
@contextlib.contextmanager
def nullcontext(*args, **kwargs):
# type: (Any, Any) -> Iterator
try:
yield
finally:
pass
def has_property(target, name):
# type: (Any, str) -> bool
if getattr(target, name, None) is not None:
return True
return False
def apply_alias(imported, target, *aliases):
# type: (Union[ModuleType, Type, None], Any, Any) -> Any
"""
Given a target with attributes, point non-existant aliases at the first existing one
:param Union[ModuleType, Type] imported: A Module or Class base
:param Any target: The target which is a member of **imported** and will have aliases
:param str aliases: A list of aliases, the first found attribute will be the basis
for all non-existant names which will be created as pointers
:return: The original target
:rtype: Any
"""
base_value = None # type: Optional[Any]
applied_aliases = set()
unapplied_aliases = set()
for alias in aliases:
if has_property(target, alias):
base_value = getattr(target, alias)
applied_aliases.add(alias)
else:
unapplied_aliases.add(alias)
is_callable = inspect.ismethod(base_value) or inspect.isfunction(base_value)
for alias in unapplied_aliases:
if is_callable:
func_copy = copy.deepcopy(base_value)
alias_value = ensure_function(imported, alias, func_copy)
else:
alias_value = base_value
setattr(target, alias, alias_value)
return target
def suppress_setattr(obj, attr, value, filter_none=False):
"""
Set an attribute, suppressing any exceptions and skipping the attempt on failure.
:param Any obj: Object to set the attribute on
:param str attr: The attribute name to set
:param Any value: The value to set the attribute to
:param bool filter_none: [description], defaults to False
:return: Nothing
:rtype: None
:Example:
>>> class MyClass(object):
... def __init__(self, name):
... self.name = name
... self.parent = None
... def __repr__(self):
... return "<{0!r} instance (name={1!r}, parent={2!r})>".format(
... self.__class__.__name__, self.name, self.parent
... )
... def __str__(self):
... return self.name
>>> me = MyClass("Dan")
>>> dad = MyClass("John")
>>> grandfather = MyClass("Joe")
>>> suppress_setattr(dad, "parent", grandfather)
>>> dad
<'MyClass' instance (name='John', parent=<'MyClass' instance (name='Joe', parent=None
)>)>
>>> suppress_setattr(me, "parent", dad)
>>> me
<'MyClass' instance (name='Dan', parent=<'MyClass' instance (name='John', parent=<'My
Class' instance (name='Joe', parent=None)>)>)>
>>> suppress_setattr(me, "grandparent", grandfather)
>>> me
<'MyClass' instance (name='Dan', parent=<'MyClass' instance (name='John', parent=<'My
Class' instance (name='Joe', parent=None)>)>)>
"""
if filter_none and value is None:
pass
try:
setattr(obj, attr, value)
except Exception: # noqa
pass
def get_allowed_args(fn_or_class):
# type: (Union[Callable, Type]) -> Tuple[List[str], Dict[str, Any]]
"""
Given a callable or a class, returns the arguments and default kwargs passed in.
:param Union[Callable, Type] fn_or_class: A function, method or class to inspect.
:return: A 2-tuple with a list of arguments and a dictionary of keywords mapped to
default values.
:rtype: Tuple[List[str], Dict[str, Any]]
"""
try:
signature = inspect.signature(fn_or_class)
except AttributeError:
import funcsigs
signature = funcsigs.signature(fn_or_class)
args = []
kwargs = {}
for arg, param in signature.parameters.items():
if (
param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY)
) and param.default is param.empty:
args.append(arg)
else:
kwargs[arg] = param.default if param.default is not param.empty else None
return args, kwargs
def call_function_with_correct_args(fn, **provided_kwargs):
# type: (Callable, Dict[str, Any]) -> Any
"""
Determines which arguments from **provided_kwargs** to call **fn** and calls it.
Consumes a list of allowed arguments (e.g. from :func:`~inspect.getargs()`) and
uses it to determine which of the arguments in the provided kwargs should be passed
through to the given callable.
:param Callable fn: A callable which has some dynamic arguments
:param List[str] allowed_args: A list of allowed arguments which can be passed to
the supplied function
:return: The result of calling the function
:rtype: Any
"""
# signature = inspect.signature(fn)
args = []
kwargs = {}
func_args, func_kwargs = get_allowed_args(fn)
for arg in func_args:
args.append(provided_kwargs[arg])
for arg in func_kwargs:
if not provided_kwargs.get(arg):
continue
kwargs[arg] = provided_kwargs[arg]
return fn(*args, **kwargs)
def filter_allowed_args(fn, **provided_kwargs):
# type: (Callable, Dict[str, Any]) -> Tuple[List[Any], Dict[str, Any]]
"""
Given a function and a kwarg mapping, return only those kwargs used in the function.
:param Callable fn: A function to inspect
:param Dict[str, Any] kwargs: A mapping of kwargs to filter
:return: A new, filtered kwarg mapping
:rtype: Tuple[List[Any], Dict[str, Any]]
"""
args = []
kwargs = {}
func_args, func_kwargs = get_allowed_args(fn)
for arg in func_args:
if arg in provided_kwargs:
args.append(provided_kwargs[arg])
for arg in func_kwargs:
if arg not in provided_kwargs:
continue
kwargs[arg] = provided_kwargs[arg]
return args, kwargs
| {
"content_hash": "19d25798f47c791a117ef6daca1e02d0",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 92,
"avg_line_length": 32.16594827586207,
"alnum_prop": 0.6308877721943048,
"repo_name": "kennethreitz/pipenv",
"id": "162b4a20e0a85d597eaf555dd9fbec9e2eb7c8a5",
"size": "14948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/vendor/pip_shims/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
} |
"""Setup script for the Google Ads Python Client Library."""
import os
import re
import sys
from setuptools import setup
PACKAGES = ['googleads']
DEPENDENCIES = ['google-auth>=2.0.0,<3.0.0',
'google-auth-oauthlib>=0.0.1,<1.0.0', 'pytz>=2015.7',
'PyYAML>=6.0, <7.0', 'requests>=2.0.0,<3.0.0',
'xmltodict>=0.9.2,<1.0.0', 'zeep>=2.5.0']
# Note: Breaking change introduced in pyfakefs 3.3.
TEST_DEPENDENCIES = ['mock>=2.0.0,<3.0.0', 'pyfakefs>=3.2,<3.3']
CLASSIFIERS = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7'
]
def GetVersion():
"""Gets the version from googleads/common.py.
We can't import this directly because new users would get ImportErrors on our
third party dependencies.
Returns:
The version of the library.
"""
with open(os.path.join('googleads', 'common.py')) as versions_file:
source = versions_file.read()
return re.search('\\nVERSION = \'(.*?)\'', source).group(1)
long_description = """
===========================================
The googleads Python Client Libraries
===========================================
The googleads Python Client Libraries support the following products:
* AdWords API
* Google Ad Manager API
You can find more information about the Google Ads Python Client Libraries
`here <https://github.com/googleads/googleads-python-lib>`_.
Supported Python Versions
=========================
This library is supported for Python 3.7+.
Installation
============
You have two options for installing the Ads Python Client Libraries:
* Install with a tool such as pip::
$ sudo pip install googleads
* Install manually after downloading and extracting the tarball::
$ sudo python setup.py install
Examples
========
If you would like to obtain example code for any of the included
client libraries, you can find it on our
`downloads page <https://github.com/googleads/googleads-python-lib/releases>`_.
Contact Us
==========
Do you have an issue using the googleads Client Libraries? Or perhaps some
feedback for how we can improve them? Feel free to let us know on our
`issue tracker <https://github.com/googleads/googleads-python-lib/issues>`_.
"""
setup(name='googleads',
version=GetVersion(),
description='Google Ads Python Client Library',
author='Mark Saniscalchi',
author_email='api.msaniscalchi@gmail.com',
url='https://github.com/googleads/googleads-python-lib',
license='Apache License 2.0',
long_description=long_description,
packages=PACKAGES,
platforms='any',
keywords='adwords dfp admanager google',
classifiers=CLASSIFIERS,
install_requires=DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
test_suite='tests')
| {
"content_hash": "42c54efa2b880f1fb2d8071c6d1b4a85",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 28.32,
"alnum_prop": 0.6627824858757062,
"repo_name": "googleads/googleads-python-lib",
"id": "b37fe53d3ba886b9d81d6aea89225f3b299a5527",
"size": "3454",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
} |
from ip_proxy.connection.redis_connection import redisDb1
from ip_proxy.connection.mysql_connection import mysqlAsyn
from ip_proxy.config import QUEUE_NUM, QUEUE_KEY
import traceback
from ip_proxy.utils.log import Log
import time
class IpQueue(object):
def __init__(self):
self.redis = redisDb1.conn
self.dbpool = mysqlAsyn.dbpool
self.logger = Log().getLogger('development')
pass
def getQueue(self, level):
key = QUEUE_KEY + str(level)
return key
def handle_error(self, failure):
self.logger.info(str(failure))
pass
def start(self):
timeArray = time.localtime(time.time())
date_time = time.strftime("%Y--%m--%d %H:%M:%S", timeArray)
self.logger.info('ip_queue start at:{}'.format(date_time))
for i in range(QUEUE_NUM):
try:
length = self.redis.llen(self.getQueue(i))
if length < 10000:
index = i
res = self.dbpool.runInteraction(self.do_select, index)
res.addErrback(self.handle_error)
pass
except Exception as e:
self.logger.info(traceback.format_exc())
pass
pass
def do_select(self, cursor, i):
try:
length = self.redis.llen(self.getQueue(i))
if length < 10000:
start = 0
limit = 2500
sql = """select id, ip, port, scheme, level, flag, times from `ip` where level = %s and `times` = 0 order by update_time asc limit %s,%s """
while True:
params = (i, start * limit, limit)
cursor.execute(sql, params)
res = cursor.fetchall()
if not len(res):
break
for value in res:
data = {'key': value['id'], 'ip': value['ip'], 'port': value['port'], 'scheme': value['scheme'], 'level': value['level'], 'flag': value['flag'], 'times': value['times']}
if data['level'] is not None:
self.redis.rpush(self.getQueue(data['level']), data)
else:
self.redis.rpush(self.getQueue(0), data)
start = start + 1
except Exception as e:
self.logger.error(traceback.format_exc())
pass
# if __name__ == '__main__':
# ip_queue = IpQueue()
# ip_queue.start()
# try:
# reactor.run()
# pass
# except Exception as e:
# print('stop')
# pass | {
"content_hash": "7507697bf04010d76777429cdb39c5c8",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 193,
"avg_line_length": 36.273972602739725,
"alnum_prop": 0.5083081570996979,
"repo_name": "owlsn/h_crawl",
"id": "dff31b0031a1a50b2ff736ce8546832ca114ff32",
"size": "2834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spiders/ip_proxy/ip_proxy/scheduler/ip_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "3257"
},
{
"name": "Python",
"bytes": "6605"
}
],
"symlink_target": ""
} |
import gym
from TabularQLearner import *
#
#
if __name__ == "__main__":
# ----------------------------------------
# Define parameters for e-Greedy policy
epsilon = 1.0 # exploration
epsilon_floor = 0.1
exploration_decay = 0.995
# Define parameters for Q-learning
alpha = 0.2
gamma = 0.97
epoch = 10
max_steps = 500
# ----------------------------------------
# Actions
# Type: Discrete(3)
# Num | Observation
# 0 | push_left
# 1 | no_push
# 2 | push_right
N_action = 3
actions = [0, 1, 2]
# ----------------------------------------
# Observation
# Type: Box(2)
# Num | Observation | Min | Max
# 0 | position | -1.2 | 0.6
# 1 | velocity | -0.07 | 0.07
N_input = 2
observation = []
# ----------------------------------------
# Define environment/game
env = gym.make('MountainCar-v0')
# ----------------------------------------
# Initialize QLearn object
AI = TabularQLearner(actions,epsilon=epsilon)
# Load pre-trained model
AI.importQ('Q_table_27_27_3_epoch_1000')
AI.plotQ()
AI.plotQaction()
# ----------------------------------------
# Test
for e in range(epoch):
# Clear trajectory
AI.clearTrajectory()
plt.pause(0.001)
# Get initial input
observation = env.reset()
# Training for single episode
step = 0
game_over = False
while (not game_over):
observation_capture = observation
env.render()
# Greedy policy
action = AI.greedy(observation)
# Apply action, get rewards and new state
observation, reward, game_over, info = env.step(action)
# Plot trajectory
AI.plotTrajectory(observation_capture,action)
step += 1
#
if observation[0] > 0.5:
print("#TEST Episode:{} finished after {} timesteps. Reached GOAL!.".format(e, step))
else:
print("#TEST Episode:{} finished after {} timesteps. Timeout!.".format(e, step))
#
# Plot
plt.pause(1.5)
# ----------------------------------------
print("Done!.")
# Close environment
env.close
# Some delay
raw_input('Press enter to terminate:')
| {
"content_hash": "0193b3f65c984c4dceb066e9379fd7e6",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 97,
"avg_line_length": 28.5609756097561,
"alnum_prop": 0.4743808710503843,
"repo_name": "dganbold/reinforcement_learning",
"id": "8a93a098dddc193fcf6eb5e239d2dee87fcedbae",
"size": "2346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QLearning/mountain-car_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57223"
}
],
"symlink_target": ""
} |
import functools
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.linux import ip_lib
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
# TODO(Carl) It is odd that this file needs this. It is a dvr detail.
ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
def build_ns_name(prefix, identifier):
"""Builds a namespace name from the given prefix and identifier
:param prefix: The prefix which must end with '-' for legacy reasons
:param identifier: The id associated with the namespace
"""
return prefix + identifier
def get_prefix_from_ns_name(ns_name):
"""Parses prefix from prefix-identifier
:param ns_name: The name of a namespace
:returns: The prefix ending with a '-' or None if there is no '-'
"""
dash_index = ns_name.find('-')
if 0 <= dash_index:
return ns_name[:dash_index + 1]
def get_id_from_ns_name(ns_name):
"""Parses identifier from prefix-identifier
:param ns_name: The name of a namespace
:returns: Identifier or None if there is no - to end the prefix
"""
dash_index = ns_name.find('-')
if 0 <= dash_index:
return ns_name[dash_index + 1:]
def check_ns_existence(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if not self.exists():
LOG.warning('Namespace %(name)s does not exist. Skipping '
'%(func)s',
{'name': self.name, 'func': f.__name__})
return
try:
return f(self, *args, **kwargs)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctx:
if not self.exists():
LOG.debug('Namespace %(name)s was concurrently deleted',
self.name)
ctx.reraise = False
return wrapped
class Namespace(object):
def __init__(self, name, agent_conf, driver, use_ipv6):
self.name = name
self.ip_wrapper_root = ip_lib.IPWrapper()
self.agent_conf = agent_conf
self.driver = driver
self.use_ipv6 = use_ipv6
def create(self, ipv6_forwarding=True):
# See networking (netdev) tree, file
# Documentation/networking/ip-sysctl.txt for an explanation of
# these sysctl values.
ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name)
cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1']
ip_wrapper.netns.execute(cmd)
# 1. Reply only if the target IP address is local address configured
# on the incoming interface; and
# 2. Always use the best local address
cmd = ['sysctl', '-w', 'net.ipv4.conf.all.arp_ignore=1']
ip_wrapper.netns.execute(cmd)
cmd = ['sysctl', '-w', 'net.ipv4.conf.all.arp_announce=2']
ip_wrapper.netns.execute(cmd)
if self.use_ipv6:
cmd = ['sysctl', '-w',
'net.ipv6.conf.all.forwarding=%d' % int(ipv6_forwarding)]
ip_wrapper.netns.execute(cmd)
def delete(self):
try:
self.ip_wrapper_root.netns.delete(self.name)
except RuntimeError:
msg = 'Failed trying to delete namespace: %s'
LOG.exception(msg, self.name)
def exists(self):
return self.ip_wrapper_root.netns.exists(self.name)
class RouterNamespace(Namespace):
def __init__(self, router_id, agent_conf, driver, use_ipv6):
self.router_id = router_id
name = self._get_ns_name(router_id)
super(RouterNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
@classmethod
def _get_ns_name(cls, router_id):
return build_ns_name(NS_PREFIX, router_id)
@check_ns_existence
def delete(self):
ns_ip = ip_lib.IPWrapper(namespace=self.name)
for d in ns_ip.get_devices():
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=self.name,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
ns_ip.del_veth(d.name)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(
d.name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.name,
prefix=EXTERNAL_DEV_PREFIX)
super(RouterNamespace, self).delete()
| {
"content_hash": "cefa463c42db220d78b7c13745891467",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 76,
"avg_line_length": 33.84444444444444,
"alnum_prop": 0.5898446049463778,
"repo_name": "noironetworks/neutron",
"id": "58b0deb60750e3bbd2c2cc5c78c83b0a464d6efb",
"size": "5205",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/l3/namespaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.views.generic import RedirectView
from mezzanine.core.views import direct_to_template
from mezzanine.conf import settings
import autocomplete_light
autocomplete_light.autodiscover()
admin.autodiscover()
# Add the urlpatterns for any custom Django applications here.
# You can also change the ``home`` view to add your own functionality
# to the project's homepage.
js_info_dict = {
'packages': ('django.conf',),
}
urlpatterns = i18n_patterns("",
# Change the admin prefix here to use an alternate URL for the
# admin interface, which would be marginally more secure.
url(r'autocomplete/', include('autocomplete_light.urls')),
(r'^inplaceeditform/', include('inplaceeditform.urls')),
(r'^jsi18n$', 'django.views.i18n.javascript_catalog', js_info_dict),
("^admin/", include(admin.site.urls)),
# TERRAPYN URLS
("^terrapyn/", include("terrapyn.geocms.urls")),
("^docker/", include("django_docker_processes.urls")),
("^favicon.ico", RedirectView.as_view(url='/static/favicon.ico')),
)
# Filebrowser admin media library.
if getattr(settings, "PACKAGE_NAME_FILEBROWSER") in settings.INSTALLED_APPS:
urlpatterns += i18n_patterns("",
("^admin/media-library/", include("%s.urls" %
settings.PACKAGE_NAME_FILEBROWSER)),
)
urlpatterns += patterns('',
# We don't want to presume how your homepage works, so here are a
# few patterns you can use to set it up.
# HOMEPAGE AS STATIC TEMPLATE
# ---------------------------
# This pattern simply loads the index.html template. It isn't
# commented out like the others, so it's the default. You only need
# one homepage pattern, so if you use a different one, comment this
# one out.
# url("^$", th_admin.index, name="home"),
url("^$", direct_to_template, {"template": "index.html"}, name="home"),
# HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE
# ---------------------------------------------
# This pattern gives us a normal ``Page`` object, so that your
# homepage can be managed via the page tree in the admin. If you
# use this pattern, you'll need to create a page in the page tree,
# and specify its URL (in the Meta Data section) as "/", which
# is the value used below in the ``{"slug": "/"}`` part.
# Also note that the normal rule of adding a custom
# template per page with the template name using the page's slug
# doesn't apply here, since we can't have a template called
# "/.html" - so for this case, the template "pages/index.html"
# should be used if you want to customize the homepage's template.
# url("^$", "mezzanine.pages.views.page", {"slug": "/"}, name="home"),
# HOMEPAGE FOR A BLOG-ONLY SITE
# -----------------------------
# This pattern points the homepage to the blog post listing page,
# and is useful for sites that are primarily blogs. If you use this
# pattern, you'll also need to set BLOG_SLUG = "" in your
# ``settings.py`` module, and delete the blog page object from the
# page tree in the admin if it was installed.
# url("^$", "mezzanine.blog.views.blog_post_list", name="home"),
# MEZZANINE'S URLS
# ----------------
# ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW.
# ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN
# FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls``
# WILL NEVER BE MATCHED!
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
("^", include("mezzanine.urls")),
# MOUNTING MEZZANINE UNDER A PREFIX
# ---------------------------------
# You can also mount all of Mezzanine's urlpatterns under a
# URL prefix if desired. When doing this, you need to define the
# ``SITE_PREFIX`` setting, which will contain the prefix. Eg:
# SITE_PREFIX = "my/site/prefix"
# For convenience, and to avoid repeating the prefix, use the
# commented out pattern below (commenting out the one above of course)
# which will make use of the ``SITE_PREFIX`` setting. Make sure to
# add the import ``from django.conf import settings`` to the top
# of this file as well.
# Note that for any of the various homepage patterns above, you'll
# need to use the ``SITE_PREFIX`` setting as well.
# ("^%s/" % settings.SITE_PREFIX, include("mezzanine.urls"))
)
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
| {
"content_hash": "a8a24b5e5016df8a961c825dc9f644fd",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 76,
"avg_line_length": 40.760330578512395,
"alnum_prop": 0.6559205190592052,
"repo_name": "JeffHeard/terrapyn_project",
"id": "57d53176e3e6177de008af9e0687a6f5de65c742",
"size": "4932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terrapyn_project/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Nginx",
"bytes": "1184"
},
{
"name": "Python",
"bytes": "25981"
},
{
"name": "Shell",
"bytes": "86"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf.urls import patterns, url, include
from djspikeval import views
__author__ = "pmeier82"
patterns_algorithm = patterns(
"",
url(r"^$", views.AlgorithmList.as_view(), name="list"),
url(r"^create/$", views.AlgorithmCreate.as_view(), name="create"),
url(r"^(?P<pk>\d+)/$", views.AlgorithmDetail.as_view(), name="detail"),
url(r"^(?P<pk>\d+)/update/$", views.AlgorithmUpdate.as_view(), name="update"),
url(r"^(?P<pk>\d+)/delete/$", views.AlgorithmDelete.as_view(), name="delete"),
)
patterns_analysis = patterns(
"",
url(r"^$", views.AnalysisList.as_view(), name="list"),
url(r"^list/(?P<pk>\d+)$", views.AnalysisList.as_view(), name="list-filter"),
url(r"^create/(?P<pk>\d+)$", views.AnalysisCreate.as_view(), name="create"),
url(r"^(?P<pk>\d+)/$", views.AnalysisDetail.as_view(), name="detail"),
url(r"^(?P<pk>\d+)/update/$", views.AnalysisUpdate.as_view(), name="update"),
url(r"^(?P<pk>\d+)/delete/$", views.AnalysisDelete.as_view(), name="delete"),
url(r"^(?P<pk>\d+)/toggle/$", views.SubmissionToggle.as_view(), name="toggle"),
url(r"^(?P<pk>\d+)/download/$", views.AnalysisDownload.as_view(), name="download"),
url(r"^(?P<pk>\d+)/start/$", views.AnalysisStart.as_view(), name="start"),
url(r"^(?P<pk>\d+)/start-all/$", views.SubmissionStart.as_view(), name="start-all"),
)
patterns_datafile = patterns(
"",
url(r"create/(?P<pk>\d+)$", views.DatafileCreate.as_view(), name="create"),
url(r'^(?P<pk>\d+)/$', views.DatafileDetail.as_view(), name="detail"),
url(r'^(?P<pk>\d+)/update/$', views.DatafileUpdate.as_view(), name="update"),
url(r'^(?P<pk>\d+)/delete/$', views.DatafileDelete.as_view(), name="delete"),
url(r'^(?P<pk>\d+)/validate/$', views.DatafileValidate.as_view(), name="validate"),
)
patterns_dataset = patterns(
"",
url(r"^$", views.DatasetList.as_view(), name="list"),
url(r"^create/$", views.DatasetCreate.as_view(), name="create"),
url(r"^(?P<pk>\d+)/$", views.DatasetDetail.as_view(), name="detail"),
url(r'^(?P<pk>\d+)/update/$', views.DatasetUpdate.as_view(), name="update"),
url(r'^(?P<pk>\d+)/delete/$', views.DatasetDelete.as_view(), name="delete"),
url(r"^(?P<pk>\d+)/toggle/$", views.DatasetToggle.as_view(), name="toggle"),
url(r"^(?P<pk>\d+)/download/$", views.DatasetDownload.as_view(), name="download"),
)
urlpatterns = patterns(
"",
url(r"^algorithm/", include(patterns_algorithm, namespace="algorithm")),
url(r"^analysis/", include(patterns_analysis, namespace="analysis")),
url(r"^datafile/", include(patterns_datafile, namespace="datafile")),
url(r"^dataset/", include(patterns_dataset, namespace="dataset")),
)
if __name__ == "__main__":
pass
| {
"content_hash": "156e81385a4745446ea896495ea26f06",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 88,
"avg_line_length": 47.28813559322034,
"alnum_prop": 0.6157706093189964,
"repo_name": "pmeier82/django-spikeval",
"id": "a79a0e270f2acc20dfadaa54c1f9b72e1efc6ed6",
"size": "2815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djspikeval/urls.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "259"
},
{
"name": "HTML",
"bytes": "44877"
},
{
"name": "Python",
"bytes": "81993"
}
],
"symlink_target": ""
} |
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
import urllib2
p1='username'
p2='userpass'
url='http://ec2-52-30-7-5.eu-west-1.compute.amazonaws.com:81/key'
# Get the keys from a specific url and then use them to connect to AWS Service
access_key_id = ""
secret_access_key = ""
url='http://ec2-52-30-7-5.eu-west-1.compute.amazonaws.com:81/key'
req = urllib2.Request(url)
res = urllib2.urlopen(req)
"""req.status_code"""
str1 = res.read()
access_key_id,secret_access_key = str1.split(':')
print access_key_id,'\n',secret_access_key
# Set up a connection to the AWS service.
conn = boto.sqs.connect_to_region("eu-west-1", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# Get a list of the queues that exists and then print the list out
str2=raw_input("input the queue name:")
queue1 = conn.get_queue(str2)
a=queue1.count()
print a
#print(response.get('MessageId'))
#print(response.get('MD5OfMessageBody'))
| {
"content_hash": "079c676c62e0b7b44a5474a27ec0ae9f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 120,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.7415094339622641,
"repo_name": "krnan9525/Lab11",
"id": "3e301e1e4549608019b3520a8747eaf12bd0764e",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythoncount-aws-msgs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8128"
}
],
"symlink_target": ""
} |
"""Tests for Ranking."""
import itertools
import math
from typing import List, Dict
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_recommenders as tfrs
def _get_tpu_embedding_feature_config(
vocab_sizes: List[int],
embedding_dim: int,
table_name_prefix: str = "embedding_table"
) -> Dict[str, tf.tpu.experimental.embedding.FeatureConfig]:
"""Returns TPU embedding feature config.
Args:
vocab_sizes: List of sizes of categories/id's in the table.
embedding_dim: Embedding dimension.
table_name_prefix: A prefix for embedding tables.
Returns:
A dictionary of feature_name, FeatureConfig pairs.
"""
feature_config = {}
for i, vocab_size in enumerate(vocab_sizes):
table_config = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=vocab_size,
dim=embedding_dim,
combiner="mean",
initializer=tf.initializers.TruncatedNormal(
mean=0.0, stddev=1 / math.sqrt(embedding_dim)
),
name=f"{table_name_prefix}_{i}"
)
feature_config[str(i)] = tf.tpu.experimental.embedding.FeatureConfig(
table=table_config)
return feature_config
def _generate_synthetic_data(num_dense: int,
vocab_sizes: List[int],
dataset_size: int,
batch_size: int,
generate_weights: bool = False) -> tf.data.Dataset:
dense_tensor = tf.random.uniform(
shape=(dataset_size, num_dense), maxval=1.0, dtype=tf.float32)
# The mean is in [0, 1] interval.
dense_tensor_mean = tf.math.reduce_mean(dense_tensor, axis=1)
sparse_tensors = []
for size in vocab_sizes:
sparse_tensors.append(
tf.random.uniform(
shape=(dataset_size,), maxval=int(size), dtype=tf.int32))
sparse_tensor_elements = {
str(i): sparse_tensors[i] for i in range(len(sparse_tensors))
}
sparse_tensors = tf.stack(sparse_tensors, axis=-1)
sparse_tensors_mean = tf.math.reduce_sum(sparse_tensors, axis=1)
# The mean is in [0, 1] interval.
sparse_tensors_mean = tf.cast(sparse_tensors_mean, dtype=tf.float32)
sparse_tensors_mean /= sum(vocab_sizes)
# The label is in [0, 1] interval.
label_tensor = (dense_tensor_mean + sparse_tensors_mean) / 2.0
# Use the threshold 0.5 to convert to 0/1 labels.
label_tensor = tf.cast(label_tensor + 0.5, tf.int32)
if generate_weights:
weights = tf.random.uniform(shape=(dataset_size, 1))
input_elem = (
{"dense_features": dense_tensor,
"sparse_features": sparse_tensor_elements},
label_tensor,
weights
)
else:
input_elem = (
{"dense_features": dense_tensor,
"sparse_features": sparse_tensor_elements},
label_tensor,
)
dataset = tf.data.Dataset.from_tensor_slices(input_elem)
return dataset.batch(batch_size, drop_remainder=True)
class RankingTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
itertools.product(
# Feature interaction layers.
(
tfrs.layers.feature_interaction.DotInteraction,
lambda: tf.keras.Sequential([
tf.keras.layers.Concatenate(),
tfrs.layers.feature_interaction.Cross()
]),
),
# Bottom stack.
(lambda: None, lambda: tfrs.layers.blocks.MLP(units=[40, 16])),
# Top stack.
(lambda: None, lambda: tfrs.layers.blocks.MLP(
units=[40, 20, 1], final_activation="sigmoid")),
# Use weights.
(True, False),
# Size threshold.
(None, -1, 20)))
def test_ranking_model(self,
feature_interaction_layer,
bottom_stack,
top_stack,
use_weights=False,
size_threshold=10):
"""Tests a ranking model."""
vocabulary_sizes = [30, 3, 26]
embedding_feature_config = _get_tpu_embedding_feature_config(
vocab_sizes=vocabulary_sizes, embedding_dim=16)
optimizer = tf.keras.optimizers.legacy.Adam()
model = tfrs.experimental.models.Ranking(
embedding_layer=tfrs.experimental.layers.embedding.PartialTPUEmbedding(
feature_config=embedding_feature_config,
optimizer=optimizer,
size_threshold=size_threshold),
bottom_stack=bottom_stack(),
feature_interaction=feature_interaction_layer(),
top_stack=top_stack())
model.compile(optimizer=optimizer, steps_per_execution=5)
dataset = _generate_synthetic_data(
num_dense=8,
vocab_sizes=vocabulary_sizes,
dataset_size=64,
batch_size=16,
generate_weights=use_weights)
model.fit(
dataset.repeat(), validation_data=dataset, epochs=1, steps_per_epoch=5)
metrics_ = model.evaluate(dataset, return_dict=True)
self.assertIn("loss", metrics_)
self.assertIn("accuracy", metrics_)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "f34a220afaf8cfaff01cf224167a68d8",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 80,
"avg_line_length": 31.9874213836478,
"alnum_prop": 0.6189539913488006,
"repo_name": "tensorflow/recommenders",
"id": "64be9c74ab742cb46f5d678e4d6bea2e0da9f533",
"size": "5719",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_recommenders/experimental/models/ranking_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "232265"
},
{
"name": "Shell",
"bytes": "2138"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
# If you'd like to try this lab with PCA instead of Isomap,
# as the dimensionality reduction technique:
Test_PCA = True
def plotDecisionBoundary(model, X, y):
print("Plotting...")
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.1
resolution = 0.1
#(2 for benign, 4 for malignant)
colors = {2:'royalblue',4:'lightsalmon'}
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
import numpy as np
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
plt.contourf(xx, yy, Z, cmap=plt.cm.seismic)
plt.axis('tight')
# Plot your testing points as well...
for label in np.unique(y):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], alpha=0.8)
p = model.get_params()
plt.title('K = ' + str(p['n_neighbors']))
plt.show()
#
# TODO: Load in the dataset, identify nans, and set proper headers.
# Be sure to verify the rows line up by looking at the file in a text editor.
#
X = pd.read_csv('Datasets//breast-cancer-wisconsin.data', names = ['sample', 'thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'status'])
#
# TODO: Copy out the status column into a slice, then drop it from the main
# dataframe. Always verify you properly executed the drop by double checking
# (printing out the resulting operating)! Many people forget to set the right
# axis here.
#
# If you goofed up on loading the dataset and notice you have a `sample` column,
# this would be a good place to drop that too if you haven't already.
#
y = X.iloc[:, 0]
X.drop(['sample'], inplace=True, axis=1)
#
# TODO: With the labels safely extracted from the dataset, replace any nan values
# with the mean feature / column value
#
X.replace('?', np.NaN, inplace=True)
X = X.fillna(X.mean())
#
# TODO: Do train_test_split. Use the same variable names as on the EdX platform in
# the reading material, but set the random_state=7 for reproduceability, and keep
# the test_size at 0.5 (50%).
#
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=7)
#
# TODO: Experiment with the basic SKLearn preprocessing scalers. We know that
# the features consist of different units mixed in together, so it might be
# reasonable to assume feature scaling is necessary. Print out a description
# of the dataset, post transformation. Recall: when you do pre-processing,
# which portion of the dataset is your model trained upon? Also which portion(s)
# of your dataset actually get transformed?
#
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X_test)
X_test = scaler.transform(X_test)
X_train = scaler.transform(X_train)
#
# PCA and Isomap are your new best friends
model = None
if Test_PCA:
print("Computing 2D Principle Components")
#
# TODO: Implement PCA here. Save your model into the variable 'model'.
# You should reduce down to two dimensions.
#
from sklearn.decomposition import PCA
model = PCA(n_components=2, svd_solver='randomized', random_state=1)
model.fit(X_train)
X_train = model.transform(X_train)
X_test = model.transform(X_test)
else:
print("Computing 2D Isomap Manifold")
#
# TODO: Implement Isomap here. Save your model into the variable 'model'
# Experiment with K values from 5-10.
# You should reduce down to two dimensions.
#
from sklearn import manifold
model = manifold.Isomap(n_neighbors = 5, n_components = 2)
model.fit(X_train)
X_train = model.transform(X_train)
X_test = model.transform(X_test)
#
# TODO: Train your model against data_train, then transform both
# data_train and data_test using your model. You can save the results right
# back into the variables themselves.
#
# .. your code here ..
#
# TODO: Implement and train KNeighborsClassifier on your projected 2D
# training data here. You can use any K value from 1 - 15, so play around
# with it and see what results you can come up. Your goal is to find a
# good balance where you aren't too specific (low-K), nor are you too
# general (high-K). You should also experiment with how changing the weights
# parameter affects the results.
#
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
model = KNeighborsClassifier(n_neighbors=9)
model.fit(X_train, y_train.values.ravel())
#
# INFO: Be sure to always keep the domain of the problem in mind! It's
# WAY more important to errantly classify a benign tumor as malignant,
# and have it removed, than to incorrectly leave a malignant tumor, believing
# it to be benign, and then having the patient progress in cancer. Since the UDF
# weights don't give you any class information, the only way to introduce this
# data into SKLearn's KNN Classifier is by "baking" it into your data. For
# example, randomly reducing the ratio of benign samples compared to malignant
# samples from the training set.
#
# TODO: Calculate + Print the accuracy of the testing set
#
print(model.score(X_test, y_test))
plotDecisionBoundary(knmodel, X_test, y_test)
| {
"content_hash": "25982a10491513c49ea0b2334649c8a4",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 189,
"avg_line_length": 31.053475935828878,
"alnum_prop": 0.7094885483037713,
"repo_name": "sly-ninja/python_for_ml",
"id": "b1e70432353ee2496b9f46650daedd5a32a8da64",
"size": "5807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Module5/assignment7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113070"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('MASTER', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Dusun',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nama_dusun', models.CharField(max_length=100)),
('keterangan', models.CharField(max_length=500)),
('createtime', models.DateTimeField(auto_now_add=True)),
('updatetime', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Provinsi',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nama_provinsi', models.CharField(max_length=100)),
('keterangan', models.CharField(max_length=500)),
('createtime', models.DateTimeField(auto_now_add=True)),
('updatetime', models.DateTimeField(auto_now=True)),
],
),
migrations.RenameField(
model_name='kecamatan',
old_name='nama_kabupaten',
new_name='id_kabupaten',
),
migrations.RenameField(
model_name='kelurahan',
old_name='nama_kecamatan',
new_name='id_kecamatan',
),
migrations.AddField(
model_name='dusun',
name='id_kelurahan',
field=models.ForeignKey(to='MASTER.Kelurahan'),
),
migrations.AddField(
model_name='kabupaten',
name='id_provinsi',
field=models.ForeignKey(default=1, to='MASTER.Provinsi'),
preserve_default=False,
),
]
| {
"content_hash": "5b969937d9cc821710f0d2be8ee6a66b",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 114,
"avg_line_length": 35.111111111111114,
"alnum_prop": 0.5416666666666666,
"repo_name": "ThinkBuntu/rembugdesa",
"id": "8f5223532b6e6c09835c59b95361ae43bb412ab0",
"size": "1920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MASTER/migrations/0002_auto_20150827_1250.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22910"
}
],
"symlink_target": ""
} |
import pytest
import uuid
from flask import json, url_for
from app.models import Article, DRAFT, READY
from tests.conftest import create_authorization_header
from tests.db import create_article
sample_articles = [
{
"id": "1",
"title": "Forty Years Fighting Racism and Intolerance",
"author": "John Gilbert",
"content": """<h2>A century with no solidarity</h2>\r\n One of the worst plagues that the twentieth century
has had to \r\n bear is racial discrimination.""",
"entrydate": "2015-11-01"
},
{
"id": "2",
"title": "Modern Mythology",
"author": "Sabine Leitner",
"content": """Despite their universal existence in all civilizations and all \r\ntimes of history,
myths have often been scoffed at and regarded as old wives\u2019 \r\ntales.""",
"entrydate": "2016-01-30"
},
]
class WhenGettingArticles:
def it_returns_all_articles(self, client, sample_article, db_session):
response = client.get(
url_for('articles.get_articles'),
headers=[create_authorization_header()]
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert len(data) == 1
assert data[0]['id'] == str(sample_article.id)
def it_returns_all_articles_summary(self, client, sample_article, db_session):
response = client.get(
url_for('articles.get_articles_summary'),
headers=[create_authorization_header()]
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert len(data) == 1
assert data[0]['id'] == str(sample_article.id)
def it_returns_up_to_4_articles_summary(self, client, sample_article, db_session):
create_article(title='test 1')
create_article(title='test 2')
create_article(title='test 3')
create_article(title='test 4')
create_article(title='test 5')
response = client.get(
url_for('articles.get_articles_summary'),
headers=[create_authorization_header()]
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert len(data) == 5
def it_returns_selected_article_summary(self, client, sample_article, db_session):
article_1 = create_article(title='test 1')
create_article(title='test 2')
article_ids = "{},{}".format(sample_article.id, article_1.id)
response = client.get(
url_for('articles.get_articles_summary', ids=article_ids),
headers=[create_authorization_header()]
)
assert response.status_code == 200
data = json.loads(response.get_data(as_text=True))
assert len(data) == 2
assert set([str(sample_article.id), str(article_1.id)]) == set(article_ids.split(','))
class WhenGettingArticleByID:
def it_returns_correct_article(self, client, sample_article, db_session):
response = client.get(
url_for('article.get_article_by_id', article_id=str(sample_article.id)),
headers=[create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['id'] == str(sample_article.id)
class WhenPostingImportArticles(object):
def it_creates_articles_for_imported_articles(self, client, db_session):
response = client.post(
url_for('articles.import_articles'),
data=json.dumps(sample_articles),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_articles = json.loads(response.get_data(as_text=True))['articles']
assert len(json_articles) == len(sample_articles)
for i in range(0, len(sample_articles) - 1):
assert json_articles[i]["old_id"] == int(sample_articles[i]["id"])
assert json_articles[i]["title"] == sample_articles[i]["title"]
assert json_articles[i]["author"] == sample_articles[i]["author"]
assert json_articles[i]["content"] == sample_articles[i]["content"]
assert json_articles[i]["created_at"] == sample_articles[i]["entrydate"]
def it_does_not_create_article_for_imported_articles_with_duplicates(self, client, db_session):
duplicate_article = {
"id": "1",
"title": "Forty Years Fighting Racism and Intolerance",
"author": "John Gilbert",
"content": """<h2>A century with no solidarity</h2>\r\n One of the worst plagues that the twentieth century
has had to \r\n bear is racial discrimination.""",
"entrydate": "2015-11-01"
},
sample_articles.extend(duplicate_article)
response = client.post(
url_for('articles.import_articles'),
data=json.dumps(sample_articles),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_articles = json.loads(response.get_data(as_text=True))['articles']
assert len(json_articles) == len(sample_articles) - 1 # don't add in duplicate article
for i in range(0, len(sample_articles) - 1):
assert json_articles[i]["old_id"] == int(sample_articles[i]["id"])
assert json_articles[i]["title"] == sample_articles[i]["title"]
assert json_articles[i]["author"] == sample_articles[i]["author"]
assert json_articles[i]["content"] == sample_articles[i]["content"]
assert json_articles[i]["created_at"] == sample_articles[i]["entrydate"]
class WhenPostingUpdateArticle:
def it_updates_an_article(self, client, db_session, sample_article):
data = {
'title': 'Updated',
'image_filename': 'new_filename.jpg'
}
response = client.post(
url_for('article.update_article_by_old_id', old_id=sample_article.old_id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 200
assert response.json['image_filename'] == data['image_filename']
class WhenPostingAddArticle:
def it_adds_an_article(self, client, db_session, sample_magazine):
data = {
'title': 'New',
'author': 'Somone',
'content': 'Something interesting',
'image_filename': 'new_filename.jpg',
'magazine_id': str(sample_magazine.id),
'tags': 'Some tag'
}
response = client.post(
url_for('article.add_article'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
assert response.json['image_filename'] == data['image_filename']
assert response.json['magazine_id'] == data['magazine_id']
articles = Article.query.all()
assert len(articles) == 1
assert articles[0].title == data['title']
assert articles[0].article_state == DRAFT
assert articles[0].tags == data['tags']
assert articles[0].magazine_id == sample_magazine.id
class WhenPostingUpdateArticle:
def it_updates_an_article(self, client, db_session, sample_article):
data = {
'title': 'Updated',
'author': 'Updated Somone',
'content': 'Something updated',
'image_filename': 'updated_filename.jpg',
'tags': 'Updated tag',
'article_state': READY
}
response = client.post(
url_for('article.update_article_by_id', article_id=sample_article.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 200
assert response.json['image_filename'] == data['image_filename']
articles = Article.query.all()
assert len(articles) == 1
assert articles[0].title == data['title']
assert articles[0].article_state == READY
assert articles[0].tags == data['tags']
| {
"content_hash": "a68c322a7ebf0d238e24e7dcab3040b9",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 119,
"avg_line_length": 38.14027149321267,
"alnum_prop": 0.6024439435282952,
"repo_name": "NewAcropolis/api",
"id": "1531ace8a0377772537206fbe7291d2d289844f0",
"size": "8429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/routes/articles/test_rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10421"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "791740"
},
{
"name": "Shell",
"bytes": "66108"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="funnelarea.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| {
"content_hash": "470e0da081e58dbbc1fc58981222a1b0",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 38.785714285714285,
"alnum_prop": 0.5948434622467772,
"repo_name": "plotly/python-api",
"id": "8fafb636323f2cff6c86694012e5806fd894ff57",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnelarea/stream/_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
__author__ = 'jt'
import random
def randomID():
return ''.join(random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz') for i in range(15))
| {
"content_hash": "41da58a154f367dbdee491fc67472bac",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 118,
"avg_line_length": 33.6,
"alnum_prop": 0.7619047619047619,
"repo_name": "jtmuyl/mass-cropper-for-cv",
"id": "06994ff82b255e0c0729b4c2bcca083bf2f332c8",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7751"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core import exceptions
from django.conf import settings
import logging
import warnings
from .models import ReloadRulesRequest
from .restrictor import IPRestrictor
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
class MiddlewareMixin(object):
def __init__(self, *args, **kwargs):
pass
logger = logging.getLogger(__name__)
class IPRestrictMiddleware(MiddlewareMixin):
restrictor = None
trusted_proxies = None
allow_proxies = None
reload_rules = None
def __init__(self, *args, **kwargs):
super(IPRestrictMiddleware, self).__init__(*args, **kwargs)
self.restrictor = IPRestrictor()
self.trusted_proxies = tuple(get_setting('IPRESTRICT_TRUSTED_PROXIES', 'TRUSTED_PROXIES', []))
self.reload_rules = get_reload_rules_setting()
self.ignore_proxy_header = bool(get_setting('IPRESTRICT_IGNORE_PROXY_HEADER', 'IGNORE_PROXY_HEADER', False))
self.trust_all_proxies = bool(get_setting('IPRESTRICT_TRUST_ALL_PROXIES', 'TRUST_ALL_PROXIES', False))
def process_request(self, request):
if self.reload_rules:
self.reload_rules_if_needed()
url = request.path_info
client_ip = self.extract_client_ip(request)
if self.restrictor.is_restricted(url, client_ip):
logger.warn("Denying access of %s to %s" % (url, client_ip))
raise exceptions.PermissionDenied
def extract_client_ip(self, request):
client_ip = request.META['REMOTE_ADDR']
if not self.ignore_proxy_header:
forwarded_for = self.get_forwarded_for(request)
if forwarded_for:
closest_proxy = client_ip
client_ip = forwarded_for.pop(0)
if self.trust_all_proxies:
return client_ip
proxies = [closest_proxy] + forwarded_for
for proxy in proxies:
if proxy not in self.trusted_proxies:
logger.warn("Client IP %s forwarded by untrusted proxy %s" % (client_ip, proxy))
raise exceptions.PermissionDenied
return client_ip
def get_forwarded_for(self, request):
hdr = request.META.get('HTTP_X_FORWARDED_FOR')
if hdr is not None:
return [ip.strip() for ip in hdr.split(',')]
else:
return []
def reload_rules_if_needed(self):
last_reload_request = ReloadRulesRequest.last_request()
if last_reload_request is not None:
if self.restrictor.last_reload < last_reload_request:
self.restrictor.reload_rules()
def get_setting(new_name, old_name, default=None):
setting_name = new_name
if hasattr(settings, old_name):
setting_name = old_name
warn_about_changed_setting(old_name, new_name)
return getattr(settings, setting_name, default)
def get_reload_rules_setting():
if hasattr(settings, 'DONT_RELOAD_RULES'):
warn_about_changed_setting('DONT_RELOAD_RULES', 'IPRESTRICT_RELOAD_RULES')
return not bool(getattr(settings, 'DONT_RELOAD_RULES'))
return bool(getattr(settings, 'IPRESTRICT_RELOAD_RULES', True))
def warn_about_changed_setting(old_name, new_name):
# DeprecationWarnings are ignored by default, so lets make sure that
# the warnings are shown by using the default UserWarning instead
warnings.warn("The setting name '%s' has been deprecated and it will be removed in a future version. "
"Please use '%s' instead." % (old_name, new_name))
| {
"content_hash": "0689e4ef9bb9da4e310640ba9f36583c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 116,
"avg_line_length": 38.62765957446808,
"alnum_prop": 0.6438997521343982,
"repo_name": "muccg/django-iprestrict",
"id": "4882c918efb1f155fca528e2c2cb37b75369399f",
"size": "3655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iprestrict/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1022"
},
{
"name": "HTML",
"bytes": "2602"
},
{
"name": "Python",
"bytes": "95892"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from olympia import amo
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ModelBase
@python_2_unicode_compatible
class DiscoveryModule(ModelBase):
"""
Keeps the application, ordering, and locale metadata for a module.
The modules are defined statically in modules.py and linked to a database
row through the module's name.
"""
id = PositiveAutoField(primary_key=True)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
module = models.CharField(max_length=255)
ordering = models.IntegerField(null=True, blank=True)
locales = models.CharField(max_length=255, blank=True, default='')
class Meta:
db_table = 'discovery_modules'
unique_together = ('app', 'module')
def __str__(self):
return u'%s (%s)' % (self.module, self.get_app_display())
| {
"content_hash": "54176a2d4fcfa4e457a5c928f5edb72a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 34.62068965517241,
"alnum_prop": 0.6872509960159362,
"repo_name": "aviarypl/mozilla-l10n-addons-server",
"id": "3c7698a33cadfdc4f4ce689e344e2055febc9ce7",
"size": "1004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/legacy_discovery/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "809734"
},
{
"name": "Dockerfile",
"bytes": "2898"
},
{
"name": "HTML",
"bytes": "515798"
},
{
"name": "JavaScript",
"bytes": "1070508"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "10596"
},
{
"name": "Python",
"bytes": "5462821"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "8821"
},
{
"name": "Smarty",
"bytes": "1388"
}
],
"symlink_target": ""
} |
from browserplus import BrowserPlus, __version__
| {
"content_hash": "560731409339b7911d232040639ccb75",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 49,
"alnum_prop": 0.7959183673469388,
"repo_name": "xujun10110/browserplus",
"id": "d092f9cddc4e9740519ad6905179aeb38c111ab8",
"size": "49",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "browserplus/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5054"
}
],
"symlink_target": ""
} |
"""Base class for MOMA initializers that return a boolean indicating success."""
import abc
from typing import Any
from dm_control import composer
class Initializer(composer.Initializer):
"""Composer initializer that returns whether it was successful."""
@abc.abstractmethod
def __call__(self, physics: Any, random_state: Any) -> bool:
raise NotImplementedError
def reset(self, physics: Any) -> bool:
"""Resets this initializer. Returns true if successful."""
return True
| {
"content_hash": "02e7c1e00ad76de9e02afd23f25f813e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 80,
"avg_line_length": 29.235294117647058,
"alnum_prop": 0.7344064386317908,
"repo_name": "deepmind/dm_robotics",
"id": "7545a37bf38f5d125b4c626d580b387147611285",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/moma/initializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "479450"
},
{
"name": "CMake",
"bytes": "34173"
},
{
"name": "Jupyter Notebook",
"bytes": "106284"
},
{
"name": "Python",
"bytes": "1413203"
},
{
"name": "Shell",
"bytes": "3244"
}
],
"symlink_target": ""
} |
"""Presubmit script for //build/skia_gold_common/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def CommonChecks(input_api, output_api):
output = []
build_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..')
skia_gold_env = dict(input_api.environ)
skia_gold_env.update({
'PYTHONPATH': build_path,
'PYTHONDONTWRITEBYTECODE': '1',
})
output.extend(
input_api.canned_checks.RunUnitTestsInDirectory(
input_api,
output_api,
input_api.PresubmitLocalPath(), [r'^.+_unittest\.py$'],
env=skia_gold_env))
output.extend(input_api.canned_checks.RunPylint(input_api, output_api))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| {
"content_hash": "dfdc892636acab40f7ef162c9735752c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 30.967741935483872,
"alnum_prop": 0.7,
"repo_name": "youtube/cobalt_sandbox",
"id": "41e1bb2f7dee69fa912b16468ca6e2b73b5151d2",
"size": "1122",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build/skia_gold_common/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def postgresql_service(postgresql_daemon_name=None, action='start'):
status_cmd = format('service {postgresql_daemon_name} status | grep running')
cmd = format('service {postgresql_daemon_name} {action}')
if action == 'status':
Execute(status_cmd)
elif action == 'stop':
Execute(cmd,
logoutput = True,
only_if = status_cmd
)
elif action == 'start':
Execute(cmd,
logoutput = True,
not_if = status_cmd
)
else:
Execute(cmd, logoutput = True)
| {
"content_hash": "2b72f6cc0471938e9cf6c53450f917df",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 32.85,
"alnum_prop": 0.7245053272450532,
"repo_name": "zouzhberk/ambaridemo",
"id": "cc7b4cc14eb0a01ca7c33bb904b588199e268907",
"size": "1336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5982"
},
{
"name": "Groff",
"bytes": "13935"
},
{
"name": "HTML",
"bytes": "52"
},
{
"name": "Java",
"bytes": "8681846"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "105599"
},
{
"name": "PowerShell",
"bytes": "43170"
},
{
"name": "Python",
"bytes": "2751909"
},
{
"name": "Ruby",
"bytes": "9652"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "247846"
}
],
"symlink_target": ""
} |
import json
import os
import requests
from requests_toolbelt import MultipartEncoder
from .errors import *
from .converter import convert_html_to_telegraph_format, convert_json_to_html, OutputFormat
base_url = 'http://telegra.ph'
save_url = 'https://edit.telegra.ph/save'
api_url = 'https://api.telegra.ph'
default_user_agent = 'Python_telegraph_poster/0.1'
def _upload(title, author, text,
author_url='', tph_uuid=None, page_id=None, user_agent=default_user_agent, convert_html=True,
clean_html=True, telegraph_base_url=base_url):
if not title:
raise TitleRequiredError('Title is required')
if not text:
raise TextRequiredError('Text is required')
content = convert_html_to_telegraph_format(text, clean_html) if convert_html else text
cookies = dict(tph_uuid=tph_uuid) if tph_uuid and page_id else None
fields = {
'Data': ('content.html', content, 'plain/text'),
'title': title,
'author': author,
'author_url': author_url,
'page_id': page_id or '0',
'save_hash': ''
}
m = MultipartEncoder(fields, boundary='TelegraPhBoundary21')
headers = {
'Content-Type': m.content_type,
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': user_agent,
'Origin': telegraph_base_url
}
with requests.Session() as r:
r.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
response = r.post(save_url, timeout=4, headers=headers, cookies=cookies, data=m.to_string())
result = json.loads(response.text)
if 'path' in result:
result['tph_uuid'] = response.cookies.get('tph_uuid') or tph_uuid
result['url'] = telegraph_base_url + '/' + result['path']
return result
else:
error_msg = result['error'] if 'error' in result else ''
raise TelegraphError(error_msg)
def _prepare_page_upload_params(params):
# significantly reduce size of request body
return json.dumps(params, ensure_ascii=False, separators=(',', ':')).encode('utf-8')
def _upload_via_api(title, author, text, author_url='', access_token=None, user_agent=default_user_agent,
convert_html=True, clean_html=True, path=None, telegraph_api_url=api_url):
if not title:
raise TitleRequiredError('Title is required')
if not text:
raise TextRequiredError('Text is required')
if not access_token:
raise APITokenRequiredError('API token is required')
if not author:
author = '' # author is optional
if not author_url:
author_url = '' # author_url is optional
content = convert_html_to_telegraph_format(text, clean_html, output_format=OutputFormat.PYTHON_LIST) if convert_html else text
method = '/createPage' if not path else '/editPage'
params = {
'access_token': access_token,
'title': title[:256],
'author_name': author[:128],
'author_url': author_url[:512],
'content': content,
}
request_headers = {
'User-Agent': user_agent,
'Content-Type': 'application/json'
}
if path:
params.update({'path': path})
resp = requests.post(telegraph_api_url + method, data=_prepare_page_upload_params(params), headers=request_headers).json()
if resp['ok'] is True:
return resp.get('result')
else:
error_msg = resp['error'] if 'error' in resp else ''
raise TelegraphError(error_msg)
def create_api_token(short_name, author_name=None, author_url=None, user_agent=default_user_agent):
params = {
'short_name': short_name,
}
if author_name:
params.update({'author_name': author_name})
if author_url:
params.update({'author_url': author_url})
resp = requests.get(api_url+'/createAccount', params, headers={'User-Agent': user_agent})
json_data = resp.json()
return json_data['result']
def upload_to_telegraph(title, author, text, author_url='', tph_uuid=None, page_id=None, user_agent=default_user_agent):
return _upload(title, author, text, author_url, tph_uuid, page_id, user_agent)
class TelegraphPoster(object):
def __init__(self, tph_uuid=None, page_id=None, user_agent=default_user_agent, clean_html=True, convert_html=True,
use_api=False, access_token=None, telegraph_api_url=api_url, telegraph_base_url=base_url):
self.title = None
self.author = None
self.author_url = None
self.text = None
self.path = None
self.tph_uuid = tph_uuid
self.page_id = page_id
self.user_agent = user_agent
self.clean_html = clean_html
self.convert_html = convert_html
self.access_token = access_token or os.getenv('TELEGRAPH_ACCESS_TOKEN', None)
self.account = None
self.use_api = use_api
self.telegraph_api_url = telegraph_api_url
self.telegraph_base_url = telegraph_base_url
if self.access_token:
# use api anyway
self.use_api = True
def _api_request(self, method, params=None):
params = params or {}
if self.access_token:
params['access_token'] = self.access_token
resp = requests.get(self.telegraph_api_url + '/' + method, params, headers={'User-Agent': self.user_agent})
return resp.json()
def post(self, title, author, text, author_url=''):
self.path = None
self.title = title
self.author = author
self.author_url = author_url
self.text = text
result = self.edit()
if not self.use_api:
self.tph_uuid = result['tph_uuid']
self.page_id = result['page_id']
return result
def edit(self, title=None, author=None, text=None, author_url='', path=None):
params = {
'title': title or self.title,
'author': author or self.author,
'text': text or self.text,
'author_url': author_url or self.author_url,
'user_agent': self.user_agent,
'clean_html': self.clean_html,
'convert_html': self.convert_html
}
if self.use_api:
params['telegraph_api_url'] = self.telegraph_api_url
result = _upload_via_api(access_token=self.access_token, path=path or self.path, **params)
self.path = result['path']
return result
else:
return _upload(
tph_uuid=self.tph_uuid,
page_id=self.page_id,
**params
)
def get_account_info(self, fields=None):
"""
Use this method to get information about a Telegraph account.
:param fields: (Array of String, default = ['short_name','author_name','author_url'])
List of account fields to return. Available fields: short_name, author_name, author_url, auth_url, page_count.
:return: Returns an Account object on success.
"""
if not self.access_token:
raise Exception('Access token is required')
return self._api_request('getAccountInfo', {
'fields': json.dumps(fields) if fields else ''
}).get('result')
def edit_account_info(self, short_name, author_name='', author_url=''):
"""
Use this method to update information about a Telegraph account.
Pass only the parameters that you want to edit
:param short_name: (String, 1-32 characters) New account name.
:param author_name: (String, 0-128 characters) New default author name used when creating new articles.
:param author_url: (String, 0-512 characters) New default profile link, opened when users click on the
author's name below the title.
Can be any link, not necessarily to a Telegram profile or channel.
:return: Account object with the default fields.
"""
if not self.access_token:
raise Exception('Access token is required')
params = {
'short_name': short_name
}
if author_name:
params['author_name'] = author_name
if author_url:
params['author_url'] = author_url
return self._api_request('editAccountInfo', params).get('result')
def get_page(self, path, return_content=False):
"""
Use this method to get a Telegraph page. Returns a Page object on success.
:param path: (String) Required. Path to the Telegraph page (in the format Title-12-31, i.e.
everything that comes after http://telegra.ph/).
:param return_content: (Boolean, default = false) If true, content field will be returned in Page object.
:return: Returns a Page object on success
"""
json_response = self._api_request('getPage', {
'path': path,
'return_content': return_content
})
if return_content:
json_response['result']['html'] = convert_json_to_html(json_response['result']['content'], self.telegraph_base_url)
return json_response.get('result')
def get_page_list(self, offset=0, limit=50):
"""
Use this method to get a list of pages belonging to a Telegraph account.
:param offset: Sequential number of the first page to be returned.
:param limit: Limits the number of pages to be retrieved.
:return: PageList object, sorted by most recently created pages first.
"""
json_response = self._api_request('getPageList', {
'offset': offset,
'limit': limit
})
return json_response.get('result')
def get_views(self, path, year=None, month=None, day=None, hour=None):
"""
Use this method to get the number of views for a Telegraph article.
:param path: Required. Path to the Telegraph page (in the format Title-12-31, where 12 is the month and 31 the
day the article was first published).
:param year: Required if month is passed. If passed, the number of page views for the requested year will be
returned.
:param month: Required if day is passed. If passed, the number of page views for the requested month will be
returned.
:param day: Required if hour is passed. If passed, the number of page views for the requested day will be
returned.
:param hour: If passed, the number of page views for the requested hour will be returned.
:return: Returns a PageViews object on success. By default, the total number of page views will be returned.
"""
return self._api_request('getViews', {
'path': path,
'year': year,
'month': month,
'day': day,
'hour': hour
}).get('result')
def create_api_token(self, short_name, author_name=None, author_url=None):
"""
Use this method to create a new Telegraph account.
Most users only need one account, but this can be useful for channel administrators who would like to keep
individual author names and profile links for each of their channels.
:param short_name: Account name, helps users with several accounts remember which they are currently using.
Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name.
:param author_name: Default author name used when creating new articles.
:param author_url: Default profile link, opened when users click on the author's name below the title.
Can be any link, not necessarily to a Telegram profile or channel.
:return: Account object with the regular fields and an additional access_token field.
"""
token_data = create_api_token(short_name, author_name, author_url, self.user_agent)
self.use_api = True
self.account = token_data
self.access_token = token_data['access_token']
return token_data
def revoke_access_token(self):
"""
Use this method to revoke access_token and generate a new one, for example, if the user would like to reset
all connected sessions, or you have reasons to believe the token was compromised
:return: Account object with new access_token and auth_url fields.
"""
if not self.access_token:
raise Exception('Access token is required')
json_response = self._api_request('revokeAccessToken')
if json_response['ok'] is True:
self.access_token = json_response['result']['access_token']
return json_response['result']
def create_page(self, *args, **kwargs):
"""
Shortcut method for post()
"""
return self.post(*args, **kwargs)
def edit_page(self, *args, **kwargs):
"""
Shortcut method for edit()
"""
return self.edit(*args, **kwargs)
def create_account(self, *args, **kwargs):
"""
Shortcut method for create_api_token()
"""
return self.create_api_token(*args, **kwargs)
| {
"content_hash": "b8e07b11281b5161deca9d55a6215ddf",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 130,
"avg_line_length": 41.55345911949686,
"alnum_prop": 0.6157862872710761,
"repo_name": "mercuree/html-telegraph-poster",
"id": "04e9369a1738c684c428209958127b91d3a50e73",
"size": "13230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "html_telegraph_poster/html_to_telegraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81211"
}
],
"symlink_target": ""
} |
import ctypes
import numpy
from ..utils.c import load_library
double1d = ctypes.POINTER(ctypes.c_double)
class RecurrentNeuralNetwork(ctypes.Structure):
_fields_ = [
("dim", ctypes.c_int),
("steps", ctypes.c_int),
("m", double1d),
("x", double1d),
]
class Simulator:
_lib = load_library('libcomparatist_rnn')
_run = _lib.RecurrentNeuralNetwork_run
_run.argtypes = [ctypes.POINTER(RecurrentNeuralNetwork)]
def __init__(self, x0=0.1, dim=1000, steps=10):
self.m = numpy.ones((dim, dim), dtype=float) / dim * 0.5
self.x = numpy.ones((steps, dim), dtype=float)
self.x[0] = x0
self.struct = RecurrentNeuralNetwork()
self.struct.steps = steps
self.struct.dim = dim
self.struct.x = self.x.ctypes.data_as(double1d)
self.struct.m = self.m.ctypes.data_as(double1d)
def run(self):
self._run(ctypes.pointer(self.struct))
def prepare(name):
from ._helper import init, params
kwds = init(**params[name])
steps, dim = kwds["x"].shape
sim = Simulator(x0=kwds["x"][0], dim=dim, steps=steps)
sim.m[:] = kwds["m"]
kwds["x"] = sim.x
def run():
sim.run()
return kwds
return run
| {
"content_hash": "3366b739f23317e7da602013a03424d2",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 64,
"avg_line_length": 24.607843137254903,
"alnum_prop": 0.5936254980079682,
"repo_name": "tkf/comparatist",
"id": "68b6edf46eb5eafe00afc78e8bb4b420766af51f",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/comparatist/rnn/clib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2940"
},
{
"name": "Julia",
"bytes": "4898"
},
{
"name": "Makefile",
"bytes": "271"
},
{
"name": "Python",
"bytes": "22452"
},
{
"name": "Shell",
"bytes": "184"
}
],
"symlink_target": ""
} |
import PythonQt
from PythonQt import QtCore, QtGui
import director.objectmodel as om
import director.visualization as vis
from director import affordanceitems
from director import lcmUtils
from director import callbacks
from director import cameracontrol
from director import splinewidget
from director import transformUtils
from director import teleoppanel
from director import footstepsdriverpanel
from director import applogic as app
from director import vtkAll as vtk
from director import filterUtils
from director.shallowCopy import shallowCopy
from director import segmentationpanel
from director import segmentation
from director import segmentationroutines
from director.vieweventfilter import ViewEventFilter
from director import viewbehaviors
import numpy as np
import ioUtils
import os
import re
import random
import colorsys
# todo: refactor these global variables
# several functions in this module depend on these global variables
# which are set by calling ViewBehaviors.addRobotBehaviors().
# These could be refactored to be members of a new behaviors class.
robotSystem = None
robotModel = None
handFactory = None
neckDriver = None
footstepsDriver = None
robotLinkSelector = None
lastRandomColor = 0.0
def resetCameraToRobot(view):
t = robotModel.getLinkFrame('pelvis')
if t is None:
t = vtk.vtkTransform()
focalPoint = [0.0, 0.0, 0.25]
position = [-4.0, -2.0, 2.25]
t.TransformPoint(focalPoint, focalPoint)
t.TransformPoint(position, position)
flyer = cameracontrol.Flyer(view)
flyer.zoomTo(focalPoint, position)
def resetCameraToHeadView(view):
head = robotModel.getLinkFrame('head')
pelvis = robotModel.getLinkFrame('pelvis')
viewDirection = np.array([1.0, 0.0, 0.0])
pelvis.TransformVector(viewDirection, viewDirection)
cameraPosition = np.array(head.GetPosition()) + 0.10 * viewDirection
camera = view.camera()
focalOffset = np.array(camera.GetFocalPoint()) - np.array(camera.GetPosition())
focalOffset /= np.linalg.norm(focalOffset)
camera.SetPosition(cameraPosition)
camera.SetFocalPoint(cameraPosition + focalOffset*0.03)
camera.SetViewUp([0, 0, 1])
camera.SetViewAngle(90)
view.render()
def getChildFrame(obj):
if hasattr(obj, 'getChildFrame'):
return obj.getChildFrame()
def placeHandModel(displayPoint, view, side='left'):
obj, _ = vis.findPickedObject(displayPoint, view)
if isinstance(obj, vis.FrameItem):
_, handFrame = handFactory.placeHandModelWithTransform(obj.transform, view, side=side, parent=obj.parent())
handFrame.frameSync = vis.FrameSync()
handFrame.frameSync.addFrame(obj)
handFrame.frameSync.addFrame(handFrame, ignoreIncoming=True)
return
pickedPoint, prop, _, normal = vis.pickPoint(displayPoint, view, pickType='cells', tolerance=0.0, returnNormal=True)
obj = vis.getObjectByProp(prop)
if not obj:
return
yaxis = -normal
zaxis = [0,0,1]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
zaxis = np.cross(xaxis, yaxis)
zaxis /= np.linalg.norm(zaxis)
t = transformUtils.getTransformFromAxes(-zaxis, yaxis, xaxis)
t.PostMultiply()
t.Translate(pickedPoint)
if side == 'right':
t.PreMultiply()
t.RotateY(180)
handObj, handFrame = handFactory.placeHandModelWithTransform(t, view, side=side, parent=obj)
syncFrame = getChildFrame(obj)
if syncFrame:
handFrame.frameSync = vis.FrameSync()
handFrame.frameSync.addFrame(handFrame, ignoreIncoming=True)
handFrame.frameSync.addFrame(syncFrame)
class RobotLinkSelector(object):
def __init__(self):
self.selectedLink = None
self.setupMenuAction()
def setupMenuAction(self):
self.action = app.addMenuAction('Tools', 'Robot Link Selector')
self.action.setCheckable(True)
self.action.checked = False
def enabled(self):
return self.action.checked == True
def selectLink(self, displayPoint, view):
if not self.enabled():
return False
robotModel, _ = vis.findPickedObject(displayPoint, view)
try:
robotModel.model.getLinkNameForMesh
except AttributeError:
return False
model = robotModel.model
pickedPoint, _, polyData = vis.pickProp(displayPoint, view)
linkName = model.getLinkNameForMesh(polyData)
if not linkName:
return False
fadeValue = 1.0 if linkName == self.selectedLink else 0.05
for name in model.getLinkNames():
linkColor = model.getLinkColor(name)
linkColor.setAlphaF(fadeValue)
model.setLinkColor(name, linkColor)
if linkName == self.selectedLink:
self.selectedLink = None
vis.hideCaptionWidget()
om.removeFromObjectModel(om.findObjectByName('selected link frame'))
else:
self.selectedLink = linkName
linkColor = model.getLinkColor(self.selectedLink)
linkColor.setAlphaF(1.0)
model.setLinkColor(self.selectedLink, linkColor)
vis.showCaptionWidget(robotModel.getLinkFrame(self.selectedLink).GetPosition(), self.selectedLink, view=view)
vis.updateFrame(robotModel.getLinkFrame(self.selectedLink), 'selected link frame', scale=0.2, parent=robotModel)
return True
def newWalkingGoal(displayPoint, view):
footFrame = footstepsDriver.getFeetMidPoint(robotModel)
worldPt1, worldPt2 = vis.getRayFromDisplayPoint(view, displayPoint)
groundOrigin = footFrame.GetPosition()
groundNormal = [0.0, 0.0, 1.0]
selectedGroundPoint = [0.0, 0.0, 0.0]
t = vtk.mutable(0.0)
vtk.vtkPlane.IntersectWithLine(worldPt1, worldPt2, groundNormal, groundOrigin, t, selectedGroundPoint)
walkingTarget = transformUtils.frameFromPositionAndRPY(selectedGroundPoint, np.array(footFrame.GetOrientation()))
footstepsdriverpanel.panel.onNewWalkingGoal(walkingTarget)
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False):
obj, _ = vis.findPickedObject(displayPoint, view)
if not obj:
return False
name = obj.getProperty('Name')
if name in ('footstep widget', 'footstep widget frame'):
om.removeFromObjectModel(om.findObjectByName('footstep widget'))
return True
match = re.match('^step (\d+)$', name)
if not match:
return False
stepIndex = int(match.group(1))
existingWidget = om.findObjectByName('footstep widget')
if existingWidget:
previousStep = existingWidget.stepIndex
om.removeFromObjectModel(existingWidget)
if previousStep == stepIndex:
return True
footMesh = shallowCopy(obj.polyData)
footFrame = transformUtils.copyFrame(obj.getChildFrame().transform)
if useHorizontalWidget:
rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]]
footFrame = transformUtils.frameFromPositionAndRPY(footFrame.GetPosition(), np.degrees(rpy))
footObj = vis.showPolyData(footMesh, 'footstep widget', parent='planning', alpha=0.2)
footObj.stepIndex = stepIndex
frameObj = vis.showFrame(footFrame, 'footstep widget frame', parent=footObj, scale=0.2)
footObj.actor.SetUserTransform(frameObj.transform)
footObj.setProperty('Color', obj.getProperty('Color'))
frameObj.setProperty('Edit', True)
rep = frameObj.widget.GetRepresentation()
rep.SetTranslateAxisEnabled(2, False)
rep.SetRotateAxisEnabled(0, False)
rep.SetRotateAxisEnabled(1, False)
frameObj.widget.HandleRotationEnabledOff()
walkGoal = om.findObjectByName('walking goal')
if walkGoal:
walkGoal.setProperty('Edit', False)
def onFootWidgetChanged(frame):
footstepsDriver.onStepModified(stepIndex - 1, frame)
frameObj.connectFrameModified(onFootWidgetChanged)
return True
def reachToFrame(frameObj, side, collisionObj):
goalFrame = teleoppanel.panel.endEffectorTeleop.newReachTeleop(frameObj.transform, side, collisionObj)
goalFrame.frameSync = vis.FrameSync()
goalFrame.frameSync.addFrame(goalFrame, ignoreIncoming=True)
goalFrame.frameSync.addFrame(frameObj)
def getAsFrame(obj):
if isinstance(obj, vis.FrameItem):
return obj
elif hasattr(obj, 'getChildFrame'):
return obj.getChildFrame()
def isGraspSeed(obj):
return hasattr(obj, 'side')
def getCollisionParent(obj):
'''
If obj is an affordance, return obj
If obj is a frame or a grasp seed, return first parent.
'''
if isinstance(obj, vis.FrameItem):
return obj.parent()
if isGraspSeed(obj):
return obj.parent()
else:
return obj
# The most recently cached PickedPoint - available as input to any other algorithm
lastCachedPickedPoint = np.array([0,0,0])
def getObjectAsPointCloud(obj):
try:
obj = obj.model.polyDataObj
except AttributeError:
pass
try:
obj.polyData
except AttributeError:
return None
if obj and obj.polyData.GetNumberOfPoints():# and (obj.polyData.GetNumberOfCells() == obj.polyData.GetNumberOfVerts()):
return obj
def getRobotActions(view, pickedObj, pickedPoint):
reachFrame = getAsFrame(pickedObj)
collisionParent = getCollisionParent(pickedObj)
pointCloudObj = getObjectAsPointCloud(pickedObj)
affordanceObj = pickedObj if isinstance(pickedObj, affordanceitems.AffordanceItem) else None
def onReachLeft():
reachToFrame(reachFrame, 'left', collisionParent)
def onReachRight():
reachToFrame(reachFrame, 'right', collisionParent)
def flipHandSide():
for obj in [pickedObj] + pickedObj.children():
if not isGraspSeed(obj):
continue
side = 'right' if obj.side == 'left' else 'left'
obj.side = side
color = [1.0, 1.0, 0.0]
if side == 'right':
color = [0.33, 1.0, 0.0]
obj.setProperty('Color', color)
polyData = handFactory.getNewHandPolyData(side)
obj.setPolyData(polyData)
handFrame = obj.children()[0]
t = transformUtils.copyFrame(handFrame.transform)
t.PreMultiply()
t.RotateY(180)
handFrame.copyFrame(t)
objName = obj.getProperty('Name')
frameName = handFrame.getProperty('Name')
if side == 'left':
obj.setProperty('Name', objName.replace("right", "left"))
handFrame.setProperty('Name', frameName.replace("right", "left"))
else:
obj.setProperty('Name', objName.replace("left", "right"))
handFrame.setProperty('Name', frameName.replace("left", "right"))
obj._renderAllViews()
def flipHandThumb():
handFrame = pickedObj.children()[0]
t = transformUtils.copyFrame(handFrame.transform)
t.PreMultiply()
t.RotateY(180)
handFrame.copyFrame(t)
pickedObj._renderAllViews()
def onSplineLeft():
splinewidget.planner.newSpline(pickedObj, 'left')
def onSplineRight():
splinewidget.planner.newSpline(pickedObj, 'right')
def onSegmentGround():
groundPoints, scenePoints = segmentation.removeGround(pointCloudObj.polyData)
vis.showPolyData(groundPoints, 'ground points', color=[0,1,0], parent='segmentation')
vis.showPolyData(scenePoints, 'scene points', color=[1,0,1], parent='segmentation')
pickedObj.setProperty('Visible', False)
def onCopyPointCloud():
global lastRandomColor
polyData = vtk.vtkPolyData()
polyData.DeepCopy(pointCloudObj.polyData)
if pointCloudObj.getChildFrame():
polyData = segmentation.transformPolyData(polyData, pointCloudObj.getChildFrame().transform)
polyData = segmentation.addCoordArraysToPolyData(polyData)
# generate random color, and average with a common color to make them generally similar
lastRandomColor = lastRandomColor + 0.1 + 0.1*random.random()
rgb = colorsys.hls_to_rgb(lastRandomColor, 0.7, 1.0)
obj = vis.showPolyData(polyData, pointCloudObj.getProperty('Name') + ' copy', color=rgb, parent='point clouds')
#t = vtk.vtkTransform()
#t.PostMultiply()
#t.Translate(filterUtils.computeCentroid(polyData))
#segmentation.makeMovable(obj, t)
om.setActiveObject(obj)
pickedObj.setProperty('Visible', False)
def onMergeIntoPointCloud():
allPointClouds = om.findObjectByName('point clouds')
if allPointClouds:
allPointClouds = [i.getProperty('Name') for i in allPointClouds.children()]
sel = QtGui.QInputDialog.getItem(None, "Point Cloud Merging", "Pick point cloud to merge into:", allPointClouds, current=0, editable=False)
sel = om.findObjectByName(sel)
# Make a copy of each in same frame
polyDataInto = vtk.vtkPolyData()
polyDataInto.ShallowCopy(sel.polyData)
if sel.getChildFrame():
polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform)
polyDataFrom = vtk.vtkPolyData()
polyDataFrom.DeepCopy(pointCloudObj.polyData)
if pointCloudObj.getChildFrame():
polyDataFrom = segmentation.transformPolyData(polyDataFrom, pointCloudObj.getChildFrame().transform)
# Actual merge
append = filterUtils.appendPolyData([polyDataFrom, polyDataInto])
if sel.getChildFrame():
polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform.GetInverse())
# resample
append = segmentationroutines.applyVoxelGrid(append, 0.01)
append = segmentation.addCoordArraysToPolyData(append)
# Recenter the frame
sel.setPolyData(append)
t = vtk.vtkTransform()
t.PostMultiply()
t.Translate(filterUtils.computeCentroid(append))
segmentation.makeMovable(sel, t)
# Hide the old one
if pointCloudObj.getProperty('Name') in allPointClouds:
pointCloudObj.setProperty('Visible', False)
def onSegmentTableScene():
data = segmentation.segmentTableScene(pointCloudObj.polyData, pickedPoint)
vis.showClusterObjects(data.clusters, parent='segmentation')
segmentation.showTable(data.table, parent='segmentation')
def onSegmentDrillAlignedWithTable():
segmentation.segmentDrillAlignedWithTable(pickedPoint, pointCloudObj.polyData)
def onCachePickedPoint():
''' Cache the Picked Point for general purpose use'''
global lastCachedPickedPoint
lastCachedPickedPoint = pickedPoint
#data = segmentation.segmentTableScene(pointCloudObj.polyData, pickedPoint)
#vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
def onLocalPlaneFit():
planePoints, normal = segmentation.applyLocalPlaneFit(pointCloudObj.polyData, pickedPoint, searchRadius=0.1, searchRadiusEnd=0.2)
obj = vis.showPolyData(planePoints, 'local plane fit', color=[0,1,0])
obj.setProperty('Point Size', 7)
fields = segmentation.makePolyDataFields(obj.polyData)
pose = transformUtils.poseFromTransform(fields.frame)
desc = dict(classname='BoxAffordanceItem', Name='local plane', Dimensions=list(fields.dims), pose=pose)
box = segmentation.affordanceManager.newAffordanceFromDescription(desc)
def onOrientToMajorPlane():
polyData, planeFrame = segmentation.orientToMajorPlane(pointCloudObj.polyData, pickedPoint=pickedPoint)
pointCloudObj.setPolyData(polyData)
def onDiskGlyph():
result = segmentation.applyDiskGlyphs(pointCloudObj.polyData)
obj = vis.showPolyData(result, 'disks', color=[0.8,0.8,0.8])
om.setActiveObject(obj)
pickedObj.setProperty('Visible', False)
def onArrowGlyph():
result = segmentation.applyArrowGlyphs(pointCloudObj.polyData)
obj = vis.showPolyData(result, 'arrows')
def onSegmentationEditor():
segmentationpanel.activateSegmentationMode(pointCloudObj.polyData)
def addNewFrame():
t = transformUtils.copyFrame(affordanceObj.getChildFrame().transform)
t.PostMultiply()
t.Translate(np.array(pickedPoint) - np.array(t.GetPosition()))
newFrame = vis.showFrame(t, '%s frame %d' % (affordanceObj.getProperty('Name'), len(affordanceObj.children())), scale=0.2, parent=affordanceObj)
affordanceObj.getChildFrame().getFrameSync().addFrame(newFrame, ignoreIncoming=True)
def copyAffordance():
desc = dict(affordanceObj.getDescription())
del desc['uuid']
desc['Name'] = desc['Name'] + ' copy'
aff = robotSystem.affordanceManager.newAffordanceFromDescription(desc)
aff.getChildFrame().setProperty('Edit', True)
def onPromoteToAffordance():
affObj = affordanceitems.MeshAffordanceItem.promotePolyDataItem(pickedObj)
robotSystem.affordanceManager.registerAffordance(affObj)
actions = []
if affordanceObj:
actions.extend([
('Copy affordance', copyAffordance),
('Add new frame', addNewFrame),
])
elif type(pickedObj) == vis.PolyDataItem:
actions.extend([
('Promote to Affordance', onPromoteToAffordance),
])
if isGraspSeed(pickedObj):
actions.extend([
(None, None),
('Flip Side', flipHandSide),
('Flip Thumb', flipHandThumb),
])
if reachFrame is not None:
actions.extend([
(None, None),
('Reach Left', onReachLeft),
('Reach Right', onReachRight),
#('Spline Left', onSplineLeft),
#('Spline Right', onSplineRight),
])
if pointCloudObj:
actions.extend([
(None, None),
('Copy Pointcloud', onCopyPointCloud),
('Merge Pointcloud Into', onMergeIntoPointCloud),
('Segment Ground', onSegmentGround),
('Segment Table', onSegmentTableScene),
('Segment Drill Aligned', onSegmentDrillAlignedWithTable),
('Local Plane Fit', onLocalPlaneFit),
('Orient with Horizontal', onOrientToMajorPlane),
('Arrow Glyph', onArrowGlyph),
('Disk Glyph', onDiskGlyph),
('Cache Pick Point', onCachePickedPoint),
(None, None),
('Open Segmentation Editor', onSegmentationEditor)
])
return actions
viewbehaviors.registerContextMenuActions(getRobotActions)
class RobotViewEventFilter(ViewEventFilter):
def onMouseWheel(self, event):
if neckDriver:
neckDriver.onWheelDelta(event.delta())
def onMouseMove(self, event):
for picker in segmentation.viewPickers:
if not picker.enabled:
continue
picker.onMouseMove(vis.mapMousePosition(self.view, event), event.modifiers())
self.consumeEvent()
def onLeftMousePress(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
displayPoint = vis.mapMousePosition(self.view, event)
if footstepsDriver:
newWalkingGoal(displayPoint, self.view)
self.consumeEvent()
for picker in segmentation.viewPickers:
if not picker.enabled:
continue
picker.onMousePress(vis.mapMousePosition(self.view, event), event.modifiers())
self.consumeEvent()
def onLeftDoubleClick(self, event):
displayPoint = vis.mapMousePosition(self.view, event)
useHorizontalWidget = (event.modifiers() == QtCore.Qt.ShiftModifier)
if toggleFootstepWidget(displayPoint, self.view, useHorizontalWidget):
self.consumeEvent()
return
if robotLinkSelector and robotLinkSelector.selectLink(displayPoint, self.view):
self.consumeEvent()
return
def onKeyPress(self, event):
consumed = False
key = str(event.text()).lower()
if key == 'r':
consumed = True
if robotModel is not None:
resetCameraToRobot(self.view)
elif key == 's':
if handFactory is not None:
side = 'left' if event.modifiers() != QtCore.Qt.ShiftModifier else 'right'
placeHandModel(self.getCursorDisplayPosition(), self.view, side)
elif key == 'n':
if neckDriver:
neckDriver.activateNeckControl()
elif key in ['0', '1', '2', '3']:
if neckDriver:
neckDriver.applyNeckPitchPreset(int(key))
if consumed:
self.consumeEvent()
def onKeyRelease(self, event):
if str(event.text()).lower() == 'n':
if neckDriver:
neckDriver.deactivateNeckControl()
class KeyPressLogCommander(ViewEventFilter):
def __init__(self, view):
ViewEventFilter.__init__(self, view)
self.commander = lcmUtils.LogPlayerCommander()
def onKeyPressRepeat(self, event):
key = str(event.text()).lower()
consumed = True
if key == 'p':
self.commander.togglePlay()
elif key == 'n':
self.commander.step()
elif key in ('+', '='):
self.commander.faster()
elif key in ('-', '_'):
self.commander.slower()
elif key == '[':
self.commander.back()
elif key == ']':
self.commander.forward()
else:
consumed = False
if consumed:
self.consumeEvent()
class RobotViewBehaviors(object):
def __init__(self, view, _robotSystem):
self.view = view
self.viewBehaviors = viewbehaviors.ViewBehaviors(view)
self.logCommander = KeyPressLogCommander(view)
self.robotViewBehaviors = RobotViewEventFilter(view)
global robotSystem, robotModel, handFactory, footstepsDriver, neckDriver, robotLinkSelector
robotSystem = _robotSystem
robotModel = robotSystem.robotStateModel
handFactory = robotSystem.handFactory
footstepsDriver = robotSystem.footstepsDriver
neckDriver = robotSystem.neckDriver
if app.getMainWindow() is not None:
robotLinkSelector = RobotLinkSelector()
| {
"content_hash": "2352c850732f7f8b8693f6303668b0dc",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 152,
"avg_line_length": 33.58718330849479,
"alnum_prop": 0.6664152282912544,
"repo_name": "RobotLocomotion/director",
"id": "565635e1a04a92399568374c9d3cd156b6a07028",
"size": "22537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/director/robotviewbehaviors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "119759"
},
{
"name": "C++",
"bytes": "500237"
},
{
"name": "CMake",
"bytes": "52624"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "Makefile",
"bytes": "5014"
},
{
"name": "Matlab",
"bytes": "161948"
},
{
"name": "Python",
"bytes": "2128090"
},
{
"name": "Shell",
"bytes": "6481"
}
],
"symlink_target": ""
} |
import argparse
import re
import struct
import subprocess
def get_output_name(hotfix, output):
if output:
return output
elif hotfix.endswith('.so'):
return hotfix[:-3] + '.hfx'
else:
return hotfix + '.hfx'
def get_raw_symbols(file_name):
process = subprocess.Popen(['nm', file_name], stdout=subprocess.PIPE)
(output, error) = process.communicate()
process.wait()
return output.decode('ascii').split('\n')
def get_build_id(file_name):
process = subprocess.Popen(['readelf', '-n', file_name], stdout=subprocess.PIPE)
(output, error) = process.communicate()
process.wait()
r = re.compile(r'\W*Build ID:\W+([0-9a-f]+)\W*')
for line in output.decode('ascii').split('\n'):
m = re.match(r, line);
if m:
return bytes.fromhex(m.groups()[0])
raise Exception('Could not find build id in "%s"' % file_name)
cmdline_parser = argparse.ArgumentParser()
cmdline_parser.add_argument('binary', help='binary file to be patched')
cmdline_parser.add_argument('hotfix', help='shared library with hotfix')
cmdline_parser.add_argument('-o', '--output', help='output file name')
args = cmdline_parser.parse_args()
binary = args.binary
hotfix = args.hotfix
output = get_output_name(hotfix, args.output)
needed_symbols = {}
found_symbols = {}
for line in get_raw_symbols(hotfix):
array = line.split(' ')
array = [x for x in array if x != '']
if len(array) != 2:
continue
needed_symbols[array[1]] = None
for line in get_raw_symbols(binary):
array = line.split(' ')
array = [x for x in array if x != '']
if len(array) != 3:
continue
if not array[2] in needed_symbols:
continue
found_symbols[array[2]] = int(array[0], base=16)
del needed_symbols[array[2]]
build_id = get_build_id(binary)
symbols = struct.pack('=I', len(build_id)) + build_id
for name, address in found_symbols.items():
bname = name.encode('ascii')
symbols += struct.pack('=QI%ds' % (len(bname)), address, len(bname), bname)
f = open(hotfix, 'rb')
hfx = f.read()
f.close()
HFX_MAGIC = 0x70626cff
HFX_VERSION = 0
HFX_TYPE = 0
out = struct.pack('=I2H2I', HFX_MAGIC, HFX_VERSION, HFX_TYPE, len(symbols),
len(hfx))
out += symbols
out += hfx
f = open(output, 'wb')
f.write(out)
f.close()
| {
"content_hash": "7ec82714b2d9a6379feec83aa3f9e10a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 84,
"avg_line_length": 26.79310344827586,
"alnum_prop": 0.6327756327756328,
"repo_name": "pdziepak/lbp",
"id": "182abe8552fb9df7985822cf90e4540e64b5d2d0",
"size": "3452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/prepare_hotfix.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "80044"
},
{
"name": "CMake",
"bytes": "3380"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
from yarn_resourcemanager import resourcemanager
class Resourcemanager(Script):
def install(self, env):
import params
self.install_packages(env,params.exclude_packages)
env.set_params(params)
#resourcemanager(action="configure")
def start(self, env):
import params
env.set_params(params)
resourcemanager(action="configure")
# FOR SECURITY
resourcemanager(action="start")
def stop(self, env):
import params
env.set_params(params)
resourcemanager(action="stop")
def status(self, env):
import status_params
env.set_params(status_params)
resourcemanager(action="status")
def refreshqueues(self, env):
import params
self.configure(env)
env.set_params(params)
resourcemanager(action="refreshqueues")
def decommission(self, env):
import params
env.set_params(params)
rm_kinit_cmd = params.rm_kinit_cmd
yarn_user = params.yarn_user
conf_dir = params.hadoop_conf_dir
user_group = params.user_group
yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=yarn_user,
group=user_group
)
if params.update_exclude_file_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
pass
pass
if __name__ == "__main__":
Resourcemanager().execute()
| {
"content_hash": "8c58ccb4f6dc53eb23715a1acd86c365",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 94,
"avg_line_length": 26.655172413793103,
"alnum_prop": 0.709357481673135,
"repo_name": "keedio/keedio-stacks",
"id": "e6ee5bf0fc37f48396ca16124b1868e9452e5b57",
"size": "2319",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "KEEDIO/1.1/services/YARN/package/scripts/resourcemanager.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "386"
},
{
"name": "Python",
"bytes": "1080418"
},
{
"name": "Shell",
"bytes": "50473"
}
],
"symlink_target": ""
} |
import datetime
import http.client
import mock
from typing import Collection
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.utils import from_client_participant_id
from rdr_service.clock import FakeClock
from rdr_service.code_constants import CONSENT_PERMISSION_YES_CODE, CONSENT_PERMISSION_NO_CODE, PPI_SYSTEM,\
RACE_WHITE_CODE, PMI_SKIP_CODE
from rdr_service.concepts import Concept
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
from rdr_service.model.code import Code
from rdr_service.model.consent_file import ConsentFile, ConsentSyncStatus, ConsentType
from rdr_service.model.hpo import HPO
from rdr_service.model.participant import Participant
from rdr_service.model.questionnaire import QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponseAnswer
from rdr_service.model.site import Site
from rdr_service.participant_enums import (
OrganizationType,
SuspensionStatus,
TEST_HPO_ID,
TEST_HPO_NAME,
WithdrawalStatus,
)
from tests.helpers.unittest_base import BaseTestCase, PDRGeneratorTestMixin, QUESTIONNAIRE_NONE_ANSWER
from tests.test_data import load_biobank_order_json
TIME_1 = datetime.datetime(2018, 1, 1)
TIME_2 = datetime.datetime(2018, 1, 3)
class ParticipantApiTest(BaseTestCase, PDRGeneratorTestMixin):
def setUp(self):
super(ParticipantApiTest, self).setUp()
provider_link = {"primary": False, "organization": {"reference": "columbia"}}
self.participant = {"providerLink": [provider_link]}
self.participant_2 = {"externalId": 12345}
self.provider_link_2 = {"primary": True, "organization": {"reference": "Organization/PITT"}}
self.summary_dao = ParticipantSummaryDao()
self.data_generator.create_database_hpo(name='VA') # VA hpo needed for consent validation
# Needed by test_switch_to_test_account
self.hpo_dao = HPODao()
self.hpo_dao.insert(
HPO(hpoId=TEST_HPO_ID, name=TEST_HPO_NAME, displayName="Test", organizationType=OrganizationType.UNSET)
)
self.order = BiobankOrderDao()
self._ehr_questionnaire_id = None
build_validator_patch = mock.patch(
'rdr_service.services.consent.validation.ConsentValidationController._build_validator'
)
self.mock_build_validator = build_validator_patch.start()
self.addCleanup(build_validator_patch.stop)
def test_participant_id_out_of_range(self):
self.send_get("Participant/P12345678", expected_status=404)
self.send_get("Participant/P1234567890", expected_status=404)
def test_insert(self):
response = self.send_post("Participant", self.participant)
participant_id = response["participantId"]
get_response = self.send_get("Participant/%s" % participant_id)
self.assertEqual(response, get_response)
biobank_id = response["biobankId"]
self.assertTrue(biobank_id.startswith("Z"))
self.assertEqual(str(WithdrawalStatus.NOT_WITHDRAWN), response["withdrawalStatus"])
self.assertEqual(str(SuspensionStatus.NOT_SUSPENDED), response["suspensionStatus"])
for auto_generated in (
"participantId",
"externalId",
"site",
"enrollmentSite",
"organization",
"awardee",
"hpoId",
"biobankId",
"signUpTime",
"lastModified",
"withdrawalStatus",
"withdrawalReason",
"withdrawalAuthored",
"withdrawalReasonJustification",
"suspensionStatus",
):
del response[auto_generated]
self.assertJsonResponseMatches(self.participant, response)
def test_insert_with_same_external_id_returns_existing_participant(self):
response = self.send_post("Participant", self.participant_2)
participant_id = response["participantId"]
get_response = self.send_get("Participant/%s" % participant_id)
self.assertEqual(get_response["externalId"], self.participant_2["externalId"])
self.assertEqual(response, get_response)
response_2 = self.send_post("Participant", self.participant_2)
self.assertEqual(response, response_2)
def test_update_no_ifmatch_specified(self):
response = self.send_post("Participant", self.participant)
# Change the provider link for the participant
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
path = "Participant/%s" % participant_id
self.send_put(path, response, expected_status=http.client.BAD_REQUEST)
def test_update_wrong_origin_fails(self):
response = self.send_post("Participant", self.participant)
# Change the provider link for the participant
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
path = "Participant/%s" % participant_id
BaseTestCase.switch_auth_user('example@spellman.com', 'vibrent')
self.send_put(path, response, headers={"If-Match": 'W/"1"'}, expected_status=http.client.BAD_REQUEST)
BaseTestCase.switch_auth_user('example@example.com', 'example')
def test_update_hpro_can_edit(self):
response = self.send_post("Participant", self.participant)
# Change the provider link for the participant
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
path = "Participant/%s" % participant_id
BaseTestCase.switch_auth_user('example@spellman.com', 'hpro')
self.send_put(path, response, headers={"If-Match": 'W/"1"'})
def test_update_bad_ifmatch_specified(self):
response = self.send_post("Participant", self.participant)
# Change the provider link for the participant
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
path = "Participant/%s" % participant_id
self.send_put(path, response, headers={"If-Match": "Blah"}, expected_status=http.client.BAD_REQUEST)
def test_update_wrong_ifmatch_specified(self):
response = self.send_post("Participant", self.participant)
# Change the provider link for the participant
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
path = "Participant/%s" % participant_id
self.send_put(path, response, headers={"If-Match": 'W/"123"'}, expected_status=http.client.PRECONDITION_FAILED)
def test_update_right_ifmatch_specified(self):
response = self.send_post("Participant", self.participant)
self.assertEqual('W/"1"', response["meta"]["versionId"])
# Change the provider link for the participant
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
response["withdrawalStatus"] = "NO_USE"
response["suspensionStatus"] = "NO_CONTACT"
response["site"] = "UNSET"
response["organization"] = "UNSET"
response["awardee"] = "PITT"
response["hpoId"] = "PITT"
path = "Participant/%s" % participant_id
update_response = self.send_put(path, response, headers={"If-Match": 'W/"1"'})
response["meta"]["versionId"] = 'W/"2"'
response["withdrawalTime"] = update_response["lastModified"]
response["suspensionTime"] = update_response["lastModified"]
self.assertJsonResponseMatches(response, update_response)
def test_update_right_suspension_status(self):
response = self.send_post("Participant", self.participant)
self.assertEqual('W/"1"', response["meta"]["versionId"])
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
response["suspensionStatus"] = "NO_CONTACT"
response["site"] = "UNSET"
response["organization"] = "UNSET"
response["awardee"] = "PITT"
response["hpoId"] = "PITT"
path = "Participant/%s" % participant_id
self.send_put(path, response, headers={"If-Match": 'W/"1"'})
response["suspensionStatus"] = "NOT_SUSPENDED"
response["meta"]["versionId"] = 'W/"3"'
response["withdrawalTime"] = None
response["withdrawalStatus"] = 'NOT_WITHDRAWN'
with FakeClock(TIME_1):
update_response = self.send_put(path, response, headers={"If-Match": 'W/"2"'})
self.assertEqual(update_response['suspensionStatus'], 'NOT_SUSPENDED')
self.assertEqual(update_response['withdrawalStatus'], 'NOT_WITHDRAWN')
self.assertNotIn('suspensionTime', update_response)
def test_change_pairing_awardee_and_site(self):
participant = self.send_post("Participant", self.participant)
participant["providerLink"] = [self.provider_link_2]
participant_id = participant["participantId"]
participant["awardee"] = "PITT"
participant["site"] = "hpo-site-monroeville"
path = "Participant/%s" % participant_id
update_awardee = self.send_put(path, participant, headers={"If-Match": 'W/"1"'})
self.assertEqual(participant["awardee"], update_awardee["awardee"])
def test_pairing_is_case_insensitive(self):
# Set the participant up
participant = self.send_post('Participant', self.participant)
# Change the site pairing information
participant_site_code_sent = 'hpo-site-Monroeville'
del participant['providerLink']
participant['site'] = participant_site_code_sent
# Re-pair using API
participant_id = from_client_participant_id(participant["participantId"])
self.send_put(f'Participant/P{participant_id}', participant, headers={"If-Match": 'W/"1"'})
# Verify that the participant is paired correctly
participant_site: Site = self.session.query(Site).join(
Participant,
Participant.siteId == Site.siteId
).filter(
Participant.participantId == participant_id
).one_or_none()
self.assertEqual(participant_site_code_sent.lower(), participant_site.googleGroup,
"Expecting the participant to be paired to Monroeville")
def test_change_pairing_for_org_then_site(self):
participant = self.send_post("Participant", self.participant)
participant["providerLink"] = [self.provider_link_2]
participant_id = participant["participantId"]
path = "Participant/%s" % participant_id
update_1 = self.send_put(path, participant, headers={"If-Match": 'W/"1"'})
participant["site"] = "hpo-site-bannerphoenix"
update_2 = self.send_put(path, participant, headers={"If-Match": 'W/"2"'})
self.assertEqual(update_1["site"], "UNSET")
self.assertEqual(update_1["organization"], "UNSET")
self.assertEqual(update_2["site"], "hpo-site-bannerphoenix")
self.assertEqual(update_2["organization"], "PITT_BANNER_HEALTH")
participant["organization"] = "AZ_TUCSON_BANNER_HEALTH"
update_3 = self.send_put(path, participant, headers={"If-Match": 'W/"3"'})
self.assertEqual(update_2["hpoId"], update_3["hpoId"])
self.assertEqual(update_2["organization"], update_3["organization"])
self.assertEqual(update_3["site"], "hpo-site-bannerphoenix")
participant["site"] = "hpo-site-clinic-phoenix"
update_4 = self.send_put(path, participant, headers={"If-Match": 'W/"4"'})
self.assertEqual(update_4["site"], "hpo-site-clinic-phoenix")
self.assertEqual(update_4["organization"], "AZ_TUCSON_BANNER_HEALTH")
self.assertEqual(update_4["awardee"], "AZ_TUCSON")
def test_enrollment_site(self):
participant = self.send_post("Participant", self.participant)
participant["providerLink"] = [self.provider_link_2]
participant_id = participant["participantId"]
path = "Participant/%s" % participant_id
update_1 = self.send_put(path, participant, headers={"If-Match": 'W/"1"'})
participant["site"] = "hpo-site-bannerphoenix"
update_2 = self.send_put(path, participant, headers={"If-Match": 'W/"2"'})
self.assertEqual(update_1["site"], "UNSET")
self.assertEqual(update_1["enrollmentSite"], "UNSET")
self.assertEqual(update_2["site"], "hpo-site-bannerphoenix")
self.assertEqual(update_2["enrollmentSite"], "hpo-site-bannerphoenix")
self.send_consent(participant_id)
ps = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual(ps['enrollmentSite'], "hpo-site-bannerphoenix")
participant["site"] = "hpo-site-clinic-phoenix"
update_3 = self.send_put(path, participant, headers={"If-Match": 'W/"3"'})
self.assertEqual(update_3["site"], "hpo-site-clinic-phoenix")
# enrollmentSite will not change
self.assertEqual(update_2["enrollmentSite"], "hpo-site-bannerphoenix")
ps = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual(ps['enrollmentSite'], "hpo-site-bannerphoenix")
def test_repairing_after_biobank_order(self):
participant = self.send_post("Participant", self.participant)
participant["providerLink"] = [self.provider_link_2]
participant_id = participant["participantId"]
participant_path = "Participant/%s" % participant_id
update_1 = self.send_put(participant_path, participant, headers={"If-Match": 'W/"1"'})
self.assertEqual(update_1["site"], "UNSET")
self.assertEqual(update_1["organization"], "UNSET")
self.assertEqual(update_1["hpoId"], "PITT")
participant["site"] = "hpo-site-bannerphoenix"
update_2 = self.send_put(participant_path, participant, headers={"If-Match": 'W/"2"'})
self.assertEqual(update_2["site"], "hpo-site-bannerphoenix")
self.assertEqual(update_2["organization"], "PITT_BANNER_HEALTH")
self.assertEqual(update_2["hpoId"], "PITT")
self.send_consent(participant_id)
bio_path = "Participant/%s/BiobankOrder" % participant_id
order_json = load_biobank_order_json(from_client_participant_id(participant_id),
filename="biobank_order_2.json")
self.send_post(bio_path, order_json)
participant["site"] = None
participant["awardee"] = "AZ_TUCSON"
participant["organization"] = None
update_3 = self.send_put(participant_path, participant, headers={"If-Match": 'W/"4"'})
self.assertEqual(update_3["site"], "UNSET")
self.assertEqual(update_3["organization"], "UNSET")
self.assertEqual(update_3["hpoId"], "AZ_TUCSON")
def test_administrative_withdrawal(self):
with FakeClock(TIME_1):
response = self.send_post("Participant", self.participant)
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
response["withdrawalStatus"] = "NO_USE"
response["suspensionStatus"] = "NO_CONTACT"
response["withdrawalReason"] = "TEST"
response["withdrawalReasonJustification"] = "This was a test account."
path = "Participant/%s" % participant_id
update_response = self.send_put(path, response, headers={"If-Match": 'W/"1"'})
with FakeClock(TIME_2):
response["meta"]["versionId"] = 'W/"2"'
response["withdrawalTime"] = update_response["lastModified"]
response["suspensionTime"] = update_response["lastModified"]
response["awardee"] = "PITT"
response["hpoId"] = "PITT"
self.assertJsonResponseMatches(response, update_response)
participant = self.send_get(path)
self.assertEqual(participant["withdrawalStatus"], "NO_USE")
def test_early_out_withdrawal(self):
"""If a participant withdraws before consent/participant summary it is called early out."""
with FakeClock(TIME_1):
response = self.send_post("Participant", self.participant)
participant_id = response["participantId"]
response["providerLink"] = [self.provider_link_2]
response["withdrawalStatus"] = "EARLY_OUT"
response["withdrawalTimeStamp"] = 1563907344169
response["suspensionStatus"] = "NOT_SUSPENDED"
response["withdrawalReason"] = "TEST"
response["withdrawalReasonJustification"] = "This was a test account."
path = "Participant/%s" % participant_id
self.send_put(path, response, headers={"If-Match": 'W/"1"'})
participant = self.send_get(path)
self.assertEqual(participant["withdrawalStatus"], "EARLY_OUT")
self.assertEqual(participant["withdrawalTime"], '2018-01-01T00:00:00')
self.assertEqual(participant["withdrawalAuthored"], '2019-07-23T18:42:24')
def test_administrative_withdrawal_with_authored_time(self):
with FakeClock(TIME_1):
response = self.send_post("Participant", self.participant)
participant_id = response["participantId"]
self.send_consent(participant_id)
response["providerLink"] = [self.provider_link_2]
response["withdrawalStatus"] = "NO_USE"
response["suspensionStatus"] = "NO_CONTACT"
response["withdrawalReason"] = "TEST"
response["withdrawalTimeStamp"] = 1563907344000
response["withdrawalReasonJustification"] = "This was a test account."
path = "Participant/%s" % participant_id
update_response = self.send_put(path, response, headers={"If-Match": 'W/"1"'})
with FakeClock(TIME_2):
del response["withdrawalTimeStamp"]
response["meta"]["versionId"] = 'W/"2"'
response["withdrawalTime"] = update_response["lastModified"]
response["suspensionTime"] = update_response["lastModified"]
response["withdrawalAuthored"] = update_response["withdrawalAuthored"]
response["awardee"] = "PITT"
response["hpoId"] = "PITT"
self.assertJsonResponseMatches(response, update_response)
p_response = self.send_get("Participant/%s" % participant_id)
self.assertEqual(p_response["withdrawalAuthored"], update_response["withdrawalAuthored"])
ps_response = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual(ps_response["withdrawalAuthored"], update_response["withdrawalAuthored"])
def submit_questionnaire_response(
self,
participant_id,
questionnaire_id,
race_code,
gender_code,
first_name,
middle_name,
last_name,
zip_code,
state_code,
street_address,
street_address2,
city,
sex_code,
login_phone_number,
sexual_orientation_code,
phone_number,
recontact_method_code,
language_code,
education_code,
income_code,
date_of_birth,
cabor_signature_uri,
time=TIME_1,
):
code_answers = []
_add_code_answer(code_answers, "race", race_code)
_add_code_answer(code_answers, "genderIdentity", gender_code)
_add_code_answer(code_answers, "state", state_code)
_add_code_answer(code_answers, "sex", sex_code)
_add_code_answer(code_answers, "sexualOrientation", sexual_orientation_code)
_add_code_answer(code_answers, "recontactMethod", recontact_method_code)
_add_code_answer(code_answers, "language", language_code)
_add_code_answer(code_answers, "education", education_code)
_add_code_answer(code_answers, "income", income_code)
string_answers = [
("firstName", first_name),
("middleName", middle_name),
("lastName", last_name),
("city", city),
("phoneNumber", phone_number),
("loginPhoneNumber", login_phone_number),
("zipCode", zip_code),
]
if street_address is not None:
string_answers.append(("streetAddress", street_address))
if street_address2 is not None:
if street_address2 == PMI_SKIP_CODE:
_add_code_answer(code_answers, "streetAddress2", street_address2)
else:
string_answers.append(("streetAddress2", street_address2))
qr = self.make_questionnaire_response_json(
participant_id,
questionnaire_id,
code_answers=code_answers,
string_answers=string_answers,
date_answers=[("dateOfBirth", date_of_birth)],
uri_answers=[("CABoRSignature", cabor_signature_uri)],
)
with FakeClock(time):
self.send_post("Participant/%s/QuestionnaireResponse" % participant_id, qr)
def _setup_initial_participant_data(self):
with FakeClock(TIME_1):
participant = self.send_post("Participant", {"providerLink": [self.provider_link_2]})
questionnaire_id = self.create_questionnaire("questionnaire3.json")
participant_id = participant["participantId"]
self.send_consent(participant_id)
self.submit_questionnaire_response(
participant_id,
questionnaire_id,
RACE_WHITE_CODE,
"male",
"Bob",
"Q",
"Jones",
"78751",
"PIIState_VA",
"1234 Main Street",
"APT C",
"Austin",
"male_sex",
"215-222-2222",
"straight",
"512-555-5555",
"email_code",
"en",
"highschool",
"lotsofmoney",
datetime.date(1978, 10, 9),
"signature.pdf",
)
return participant_id, questionnaire_id
def test_switch_to_test_account(self):
participant_id, questionnaire_id = self._setup_initial_participant_data()
ps_1 = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual("215-222-2222", ps_1["loginPhoneNumber"])
self.assertEqual("PITT", ps_1["hpoId"])
p_1 = self.send_get("Participant/%s" % participant_id)
self.assertEqual("PITT", p_1["hpoId"])
self.assertEqual(TIME_1.strftime("%Y" "-" "%m" "-" "%d" "T" "%X"), p_1["lastModified"])
self.assertEqual('W/"1"', p_1["meta"]["versionId"])
# Test all the PDR / resource generator results while participant not considered a test participant
ps_bqs_data = self.make_bq_participant_summary(participant_id)
ps_rsc = self.make_participant_resource(participant_id)
self.assertEqual(ps_bqs_data.get('test_participant'), 0)
self.assertEqual(ps_rsc.get('test_participant'), 0)
# change login phone number to 444-222-2222
self.submit_questionnaire_response(
participant_id,
questionnaire_id,
RACE_WHITE_CODE,
"male",
"Bob",
"Q",
"Jones",
"78751",
"PIIState_VA",
"1234 Main Street",
"APT C",
"Austin",
"male_sex",
"444-222-2222",
"straight",
"512-555-5555",
"email_code",
"en",
"highschool",
"lotsofmoney",
datetime.date(1978, 10, 9),
"signature.pdf",
TIME_2,
)
ps_1_with_test_login_phone_number = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual("444-222-2222", ps_1_with_test_login_phone_number["loginPhoneNumber"])
self.assertEqual("TEST", ps_1_with_test_login_phone_number["hpoId"])
self.assertEqual("1234 Main Street", ps_1_with_test_login_phone_number["streetAddress"])
self.assertEqual("APT C", ps_1_with_test_login_phone_number["streetAddress2"])
p_1 = self.send_get("Participant/%s" % participant_id)
self.assertEqual("TEST", p_1["hpoId"])
self.assertEqual(TIME_2.strftime("%Y" "-" "%m" "-" "%d" "T" "%X"), p_1["lastModified"])
self.assertEqual('W/"2"', p_1["meta"]["versionId"])
# Retest all the PDR / resource generator results after participant is updated with test participant data
ps_bqs_data = self.make_bq_participant_summary(participant_id)
ps_rsc = self.make_participant_resource(participant_id)
self.assertEqual(ps_bqs_data.get('test_participant'), 1)
self.assertEqual(ps_rsc.get('test_participant'), 1)
def test_street_address_two_clears_on_address_update(self):
participant_id, questionnaire_id = self._setup_initial_participant_data()
# Change street address to only have one line
self.submit_questionnaire_response(
participant_id,
questionnaire_id,
RACE_WHITE_CODE,
"male",
"Bob",
"Q",
"Jones",
"78751",
"PIIState_VA",
"44 Hickory Lane",
"",
"Austin",
"male_sex",
"444-222-2222",
"straight",
"512-555-5555",
"email_code",
"en",
"highschool",
"lotsofmoney",
datetime.date(1978, 10, 9),
"signature.pdf",
TIME_2,
)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual("", participant_summary["streetAddress2"])
def test_street_address_two_clears_on_no_answer(self):
participant_id, questionnaire_id = self._setup_initial_participant_data()
# We could see a submission to the street address line 2 without an answer included with it
self.submit_questionnaire_response(
participant_id,
questionnaire_id,
RACE_WHITE_CODE,
"male",
"Bob",
"Q",
"Jones",
"78751",
"PIIState_VA",
"44 Hickory Lane",
QUESTIONNAIRE_NONE_ANSWER,
"Austin",
"male_sex",
"444-222-2222",
"straight",
"512-555-5555",
"email_code",
"en",
"highschool",
"lotsofmoney",
datetime.date(1978, 10, 9),
"signature.pdf",
TIME_2,
)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertNotIn("streetAddress2", participant_summary)
# Make sure the street address 2 answer is set inactive too
street_address_2_active_answer = self.session.query(QuestionnaireResponseAnswer)\
.join(QuestionnaireQuestion)\
.join(Code, Code.codeId == QuestionnaireQuestion.codeId)\
.filter(Code.value == 'PIIAddress_StreetAddress2',
QuestionnaireResponseAnswer.endTime.is_(None))\
.one_or_none()
self.assertIsNone(street_address_2_active_answer)
def test_street_address_two_clears_on_skip(self):
participant_id, questionnaire_id = self._setup_initial_participant_data()
# We could see a submission to the street address line 2 without an answer included with it
self.submit_questionnaire_response(
participant_id,
questionnaire_id,
RACE_WHITE_CODE,
"male",
"Bob",
"Q",
"Jones",
"78751",
"PIIState_VA",
"44 Hickory Lane",
PMI_SKIP_CODE,
"Austin",
"male_sex",
"444-222-2222",
"straight",
"512-555-5555",
"email_code",
"en",
"highschool",
"lotsofmoney",
datetime.date(1978, 10, 9),
"signature.pdf",
TIME_2,
)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertNotIn("streetAddress2", participant_summary)
def test_first_study_consent_time_set(self):
with FakeClock(TIME_1):
participant = self.send_post("Participant", {"providerLink": [self.provider_link_2]})
participant_id = participant["participantId"]
with FakeClock(datetime.datetime(2020, 6, 1)):
self.send_consent(participant_id)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual('2020-06-01T00:00:00', participant_summary['consentForStudyEnrollmentFirstYesAuthored'])
def test_first_study_consent_not_modified(self):
with FakeClock(TIME_1):
participant = self.send_post("Participant", {"providerLink": [self.provider_link_2]})
participant_id = participant["participantId"]
with FakeClock(datetime.datetime(2020, 6, 1)):
self.send_consent(participant_id)
with FakeClock(datetime.datetime(2020, 8, 1)):
self.send_consent(participant_id)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual('2020-06-01T00:00:00', participant_summary['consentForStudyEnrollmentFirstYesAuthored'])
def submit_ehr_questionnaire(self, participant_id, ehr_response_code):
if not self._ehr_questionnaire_id:
self._ehr_questionnaire_id = self.create_questionnaire("ehr_consent_questionnaire.json")
code_answers = []
_add_code_answer(code_answers, 'ehrConsent', ehr_response_code)
qr_json = self.make_questionnaire_response_json(
participant_id,
self._ehr_questionnaire_id,
code_answers=code_answers,
)
self.send_post(self.questionnaire_response_url(participant_id), qr_json)
def test_first_ehr_consent_time_set(self):
participant_id, _ = self._setup_initial_participant_data()
with FakeClock(datetime.datetime(2020, 3, 12)):
self.submit_ehr_questionnaire(participant_id, CONSENT_PERMISSION_YES_CODE)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual('2020-03-12T00:00:00',
participant_summary['consentForElectronicHealthRecordsFirstYesAuthored'])
def test_first_ehr_consent_not_modified(self):
participant_id, _ = self._setup_initial_participant_data()
with FakeClock(datetime.datetime(2020, 3, 12)):
self.submit_ehr_questionnaire(participant_id, CONSENT_PERMISSION_YES_CODE)
with FakeClock(datetime.datetime(2020, 9, 12)):
self.submit_ehr_questionnaire(participant_id, CONSENT_PERMISSION_YES_CODE)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertEqual('2020-03-12T00:00:00',
participant_summary['consentForElectronicHealthRecordsFirstYesAuthored'])
def test_first_ehr_consent_not_set_on_no(self):
participant_id, _ = self._setup_initial_participant_data()
with FakeClock(datetime.datetime(2020, 3, 12)):
self.submit_ehr_questionnaire(participant_id, CONSENT_PERMISSION_NO_CODE)
participant_summary = self.send_get("Participant/%s/Summary" % participant_id)
self.assertNotIn('consentForElectronicHealthRecordsFirstYesAuthored', participant_summary)
def test_pid_rid_mapping_api(self):
self.send_post("Participant", {"providerLink": [self.provider_link_2]})
result = self.send_get("ParticipantId/ResearchId/Mapping?signUpAfter=2020-01-01&sort=lastModified")
self.assertEqual(len(result['data']), 1)
self.assertTrue(isinstance(result['data'][0]['research_id'], int))
self.assertEqual(len(str(result['data'][0]['research_id'])), 7)
self.assertEqual(result['sort_by'], 'lastModified')
def test_new_participant_with_test_participant_flag(self):
org = self.data_generator.create_database_organization(externalId='test_org')
site = self.data_generator.create_database_site(googleGroup='test_site')
response = self.send_post("Participant", {
'testParticipant': True,
'organization': org.externalId,
'site': site.googleGroup
})
participant_id = from_client_participant_id(response['participantId'])
participant: Participant = self.session.query(Participant).filter(
Participant.participantId == participant_id
).one()
self.assertTrue(participant.isTestParticipant)
# make sure the participant is paired correctly (that what was sent was ignored)
self.assertEqual(TEST_HPO_ID, participant.hpoId)
self.assertIsNone(participant.organizationId)
self.assertIsNone(participant.siteId)
def test_update_existing_participant_as_test_participant_flag(self):
hpo = self.data_generator.create_database_hpo()
org = self.data_generator.create_database_organization(externalId='test_org')
site = self.data_generator.create_database_site(googleGroup='test_site')
participant = self.data_generator.create_database_participant(
siteId=site.siteId,
organizationId=org.organizationId,
hpoId=hpo.hpoId
)
# When updating as a test participant, only the testParticipant field should need to be sent
self.send_put(f"Participant/P{participant.participantId}", {
'testParticipant': True
}, headers={"If-Match": 'W/"1"'})
self.session.expire_all()
updated_participant: Participant = self.session.query(Participant).filter(
Participant.participantId == participant.participantId
).one()
self.assertTrue(updated_participant.isTestParticipant)
# make sure the participant is paired correctly (that the org and site are cleared, and the TEST hpo is used)
self.assertEqual(TEST_HPO_ID, updated_participant.hpoId)
self.assertIsNone(updated_participant.organizationId)
self.assertIsNone(updated_participant.siteId)
def _send_pairing_request(self, participant_id, org_name):
self.send_put(f'Participant/P{participant_id}', {
'participantId': f'P{participant_id}',
'organization': org_name,
'withdrawalStatus': 'NOT_WITHDRAWN',
'suspensionStatus': 'NOT_SUSPENDED',
'meta': {'versionId': 'W/"1"'}
}, headers={"If-Match": 'W/"1"'})
def test_org_change_preps_files(self):
"""
When a participant is paired to an organization for the first time, or changes organizations, any
consent files that have been synced will need to be synced again to the new organization
"""
# Create an unpaired participant and some consent files that have been synced
summary = self.data_generator.create_database_participant_summary()
self.data_generator.create_database_consent_file(
participant_id=summary.participantId,
type=ConsentType.PRIMARY,
sync_status=ConsentSyncStatus.SYNC_COMPLETE
)
self.data_generator.create_database_consent_file(
participant_id=summary.participantId,
type=ConsentType.EHR,
sync_status=ConsentSyncStatus.SYNC_COMPLETE
)
# Pair the participant to an organization through the API
test_org_external_id = 'test_org'
self.data_generator.create_database_organization(externalId=test_org_external_id)
self._send_pairing_request(participant_id=summary.participantId, org_name=test_org_external_id)
# Make sure all the participant's consent files were set to READY_TO_SYNC
participant_consent_files: Collection[ConsentFile] = self.session.query(ConsentFile).filter(
ConsentFile.participant_id == summary.participantId
).all()
self.assertEqual(2, len(participant_consent_files)) # making sure we got the consent files
self.assertTrue(
all([file.sync_status == ConsentSyncStatus.READY_FOR_SYNC for file in participant_consent_files])
)
def _add_code_answer(code_answers, link_id, code):
if code:
code_answers.append((link_id, Concept(PPI_SYSTEM, code)))
| {
"content_hash": "91176fadd130c5b914b97360d0f584d8",
"timestamp": "",
"source": "github",
"line_count": 813,
"max_line_length": 119,
"avg_line_length": 45.31488314883149,
"alnum_prop": 0.6261773567492739,
"repo_name": "all-of-us/raw-data-repository",
"id": "b71eea372365e8c3df9f9dec0572dc84b9a22587",
"size": "36841",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/api_tests/test_participant_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
import logging
from bookie import app_generator
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
app = app_generator.create_app() # pylint: disable=invalid-name
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.debug = False
app.run(host="0.0.0.0", port=8000)
| {
"content_hash": "b22937205033c5ccec0753ebb1ce02f6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 25.6875,
"alnum_prop": 0.6934306569343066,
"repo_name": "JasonMWhite/bookie",
"id": "1a8ae3fa951fc18a4b87210d1f38a29064e4fd79",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bookie/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "8869"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'status': 'preview',
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipinfoio_facts
short_description: "Retrieve IP geolocation facts of a host's IP address"
description:
- "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
version_added: "2.3"
author: "Aleksei Kostiuk (@akostyuk)"
options:
timeout:
description:
- HTTP connection timeout in seconds
required: false
default: 10
http_agent:
description:
- Set http user agent
required: false
default: "ansible-ipinfoio-module/0.0.1"
notes:
- "Check http://ipinfo.io/ for more information"
'''
EXAMPLES = '''
# Retrieve geolocation data of a host's IP address
- name: get IP geolocation data
ipinfoio_facts:
'''
RETURN = '''
ansible_facts:
description: "Dictionary of ip geolocation facts for a host's IP address"
returned: changed
type: dictionary
contains:
ip:
description: "Public IP address of a host"
type: string
sample: "8.8.8.8"
hostname:
description: Domain name
type: string
sample: "google-public-dns-a.google.com"
country:
description: ISO 3166-1 alpha-2 country code
type: string
sample: "US"
region:
description: State or province name
type: string
sample: "California"
city:
description: City name
type: string
sample: "Mountain View"
loc:
description: Latitude and Longitude of the location
type: string
sample: "37.3860,-122.0838"
org:
description: "organization's name"
type: string
sample: "AS3356 Level 3 Communications, Inc."
postal:
description: Postal code
type: string
sample: "94035"
'''
USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
class IpinfoioFacts(object):
def __init__(self, module):
self.url = 'https://ipinfo.io/json'
self.timeout = module.params.get('timeout')
self.module = module
def get_geo_data(self):
response, info = fetch_url(self.module, self.url, force=True, # NOQA
timeout=self.timeout)
try:
info['status'] == 200
except AssertionError:
self.module.fail_json(msg='Could not get {} page, '
'check for connectivity!'.format(self.url))
else:
try:
content = response.read()
result = self.module.from_json(content.decode('utf8'))
except ValueError:
self.module.fail_json(
msg='Failed to parse the ipinfo.io response: '
'{0} {1}'.format(self.url, content))
else:
return result
def main():
module = AnsibleModule( # NOQA
argument_spec=dict(
http_agent=dict(default=USER_AGENT),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
ipinfoio = IpinfoioFacts(module)
ipinfoio_result = dict(
changed=False, ansible_facts=ipinfoio.get_geo_data())
module.exit_json(**ipinfoio_result)
from ansible.module_utils.basic import * # NOQA
from ansible.module_utils.urls import * # NOQA
if __name__ == '__main__':
main()
| {
"content_hash": "982a450b26d2a3e418588ba2b80a67ae",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 27.39344262295082,
"alnum_prop": 0.5951526032315978,
"repo_name": "nwiizo/workspace_2017",
"id": "748c49dcc9ae45317b6d8e464b462d19eaaa2a7e",
"size": "4065",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "ansible-modules-extras/network/ipinfoio_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcplife Trace the lifespan of TCP sessions and summarize.
# For Linux, uses BCC, BPF. Embedded C.
#
# USAGE: tcplife [-h] [-C] [-S] [-p PID] [interval [count]]
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# While throughput counters are emitted, they are fetched in a low-overhead
# manner: reading members of the tcp_info struct on TCP close. ie, we do not
# trace send/receive.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# IDEA: Julia Evans
#
# 18-Oct-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
import argparse
from socket import inet_ntop, ntohs, AF_INET, AF_INET6
from struct import pack
import ctypes as ct
from time import strftime
# arguments
examples = """examples:
./tcplife # trace all TCP connect()s
./tcplife -t # include time column (HH:MM:SS)
./tcplife -w # wider colums (fit IPv6)
./tcplife -stT # csv output, with times & timestamps
./tcplife -p 181 # only trace PID 181
./tcplife -L 80 # only trace local port 80
./tcplife -L 80,81 # only trace local ports 80 and 81
./tcplife -D 80 # only trace remote port 80
"""
parser = argparse.ArgumentParser(
description="Trace the lifespan of TCP sessions and summarize",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--time", action="store_true",
help="include time column on output (HH:MM:SS)")
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output (seconds)")
parser.add_argument("-w", "--wide", action="store_true",
help="wide column output (fits IPv6 addresses)")
parser.add_argument("-s", "--csv", action="store_true",
help="comma seperated values output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-L", "--localport",
help="comma-separated list of local ports to trace.")
parser.add_argument("-D", "--remoteport",
help="comma-separated list of remote ports to trace.")
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#define KBUILD_MODNAME "foo"
#include <linux/tcp.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_HASH(birth, struct sock *, u64);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 pid;
u64 saddr;
u64 daddr;
u64 ports;
u64 rx_b;
u64 tx_b;
u64 span_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u64 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ports;
u64 rx_b;
u64 tx_b;
u64 span_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
struct id_t {
u32 pid;
char task[TASK_COMM_LEN];
};
BPF_HASH(whoami, struct sock *, struct id_t);
int kprobe__tcp_set_state(struct pt_regs *ctx, struct sock *sk, int state)
{
u32 pid = bpf_get_current_pid_tgid() >> 32;
// lport is either used in a filter here, or later
u16 lport = sk->__sk_common.skc_num;
FILTER_LPORT
// dport is either used in a filter here, or later
u16 dport = sk->__sk_common.skc_dport;
FILTER_DPORT
/*
* This tool includes PID and comm context. It's best effort, and may
* be wrong in some situations. It currently works like this:
* - record timestamp on any state < TCP_FIN_WAIT1
* - cache task context on:
* TCP_SYN_SENT: tracing from client
* TCP_LAST_ACK: client-closed from server
* - do output on TCP_CLOSE:
* fetch task context if cached, or use current task
*/
// capture birth time
if (state < TCP_FIN_WAIT1) {
/*
* Matching just ESTABLISHED may be sufficient, provided no code-path
* sets ESTABLISHED without a tcp_set_state() call. Until we know
* that for sure, match all early states to increase chances a
* timestamp is set.
* Note that this needs to be set before the PID filter later on,
* since the PID isn't reliable for these early stages, so we must
* save all timestamps and do the PID filter later when we can.
*/
u64 ts = bpf_ktime_get_ns();
birth.update(&sk, &ts);
}
// record PID & comm on SYN_SENT
if (state == TCP_SYN_SENT || state == TCP_LAST_ACK) {
// now we can PID filter, both here and a little later on for CLOSE
FILTER_PID
struct id_t me = {.pid = pid};
bpf_get_current_comm(&me.task, sizeof(me.task));
whoami.update(&sk, &me);
}
if (state != TCP_CLOSE)
return 0;
// calculate lifespan
u64 *tsp, delta_us;
tsp = birth.lookup(&sk);
if (tsp == 0) {
whoami.delete(&sk); // may not exist
return 0; // missed create
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
birth.delete(&sk);
// fetch possible cached data, and filter
struct id_t *mep;
mep = whoami.lookup(&sk);
if (mep != 0)
pid = mep->pid;
FILTER_PID
// get throughput stats. see tcp_get_info().
u64 rx_b = 0, tx_b = 0, sport = 0;
struct tcp_sock *tp = (struct tcp_sock *)sk;
rx_b = tp->bytes_received;
tx_b = tp->bytes_acked;
u16 family = sk->__sk_common.skc_family;
if (family == AF_INET) {
struct ipv4_data_t data4 = {.span_us = delta_us,
.rx_b = rx_b, .tx_b = tx_b};
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = sk->__sk_common.skc_rcv_saddr;
data4.daddr = sk->__sk_common.skc_daddr;
// a workaround until data4 compiles with separate lport/dport
data4.pid = pid;
data4.ports = ntohs(dport) + ((0ULL + lport) << 32);
if (mep == 0) {
bpf_get_current_comm(&data4.task, sizeof(data4.task));
} else {
bpf_probe_read(&data4.task, sizeof(data4.task), (void *)mep->task);
}
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else /* 6 */ {
struct ipv6_data_t data6 = {.span_us = delta_us,
.rx_b = rx_b, .tx_b = tx_b};
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read(&data6.saddr, sizeof(data6.saddr),
sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read(&data6.daddr, sizeof(data6.daddr),
sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
// a workaround until data6 compiles with separate lport/dport
data6.ports = ntohs(dport) + ((0ULL + lport) << 32);
data6.pid = pid;
if (mep == 0) {
bpf_get_current_comm(&data6.task, sizeof(data6.task));
} else {
bpf_probe_read(&data6.task, sizeof(data6.task), (void *)mep->task);
}
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
if (mep != 0)
whoami.delete(&sk);
return 0;
}
"""
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID',
'if (pid != %s) { return 0; }' % args.pid)
if args.remoteport:
dports = [int(dport) for dport in args.remoteport.split(',')]
dports_if = ' && '.join(['dport != %d' % ntohs(dport) for dport in dports])
bpf_text = bpf_text.replace('FILTER_DPORT',
'if (%s) { birth.delete(&sk); return 0; }' % dports_if)
if args.localport:
lports = [int(lport) for lport in args.localport.split(',')]
lports_if = ' && '.join(['lport != %d' % lport for lport in lports])
bpf_text = bpf_text.replace('FILTER_LPORT',
'if (%s) { birth.delete(&sk); return 0; }' % lports_if)
bpf_text = bpf_text.replace('FILTER_PID', '')
bpf_text = bpf_text.replace('FILTER_DPORT', '')
bpf_text = bpf_text.replace('FILTER_LPORT', '')
if debug:
print(bpf_text)
# event data
TASK_COMM_LEN = 16 # linux/sched.h
class Data_ipv4(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("pid", ct.c_ulonglong),
("saddr", ct.c_ulonglong),
("daddr", ct.c_ulonglong),
("ports", ct.c_ulonglong),
("rx_b", ct.c_ulonglong),
("tx_b", ct.c_ulonglong),
("span_us", ct.c_ulonglong),
("task", ct.c_char * TASK_COMM_LEN)
]
class Data_ipv6(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("pid", ct.c_ulonglong),
("saddr", (ct.c_ulonglong * 2)),
("daddr", (ct.c_ulonglong * 2)),
("ports", ct.c_ulonglong),
("rx_b", ct.c_ulonglong),
("tx_b", ct.c_ulonglong),
("span_us", ct.c_ulonglong),
("task", ct.c_char * TASK_COMM_LEN)
]
#
# Setup output formats
#
# Don't change the default output (next 2 lines): this fits in 80 chars. I
# know it doesn't have NS or UIDs etc. I know. If you really, really, really
# need to add columns, columns that solve real actual problems, I'd start by
# adding an extended mode (-x) to included those columns.
#
header_string = "%-5s %-10.10s %s%-15s %-5s %-15s %-5s %5s %5s %s"
format_string = "%-5d %-10.10s %s%-15s %-5d %-15s %-5d %5d %5d %.2f"
if args.wide:
header_string = "%-5s %-16.16s %-2s %-26s %-5s %-26s %-5s %6s %6s %s"
format_string = "%-5d %-16.16s %-2s %-26s %-5s %-26s %-5d %6d %6d %.2f"
if args.csv:
header_string = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s"
format_string = "%d,%s,%s,%s,%s,%s,%d,%d,%d,%.2f"
# process event
def print_ipv4_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data_ipv4)).contents
global start_ts
if args.time:
if args.csv:
print("%s," % strftime("%H:%M:%S"), end="")
else:
print("%-8s " % strftime("%H:%M:%S"), end="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
delta_s = (float(event.ts_us) - start_ts) / 1000000
if args.csv:
print("%.6f," % delta_s, end="")
else:
print("%-9.6f " % delta_s, end="")
print(format_string % (event.pid, event.task.decode(),
"4" if args.wide or args.csv else "",
inet_ntop(AF_INET, pack("I", event.saddr)), event.ports >> 32,
inet_ntop(AF_INET, pack("I", event.daddr)), event.ports & 0xffffffff,
event.tx_b / 1024, event.rx_b / 1024, float(event.span_us) / 1000))
def print_ipv6_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data_ipv6)).contents
global start_ts
if args.time:
if args.csv:
print("%s," % strftime("%H:%M:%S"), end="")
else:
print("%-8s " % strftime("%H:%M:%S"), end="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
delta_s = (float(event.ts_us) - start_ts) / 1000000
if args.csv:
print("%.6f," % delta_s, end="")
else:
print("%-9.6f " % delta_s, end="")
print(format_string % (event.pid, event.task.decode(),
"6" if args.wide or args.csv else "",
inet_ntop(AF_INET6, event.saddr), event.ports >> 32,
inet_ntop(AF_INET6, event.daddr), event.ports & 0xffffffff,
event.tx_b / 1024, event.rx_b / 1024, float(event.span_us) / 1000))
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.time:
if args.csv:
print("%s," % ("TIME"), end="")
else:
print("%-8s " % ("TIME"), end="")
if args.timestamp:
if args.csv:
print("%s," % ("TIME(s)"), end="")
else:
print("%-9s " % ("TIME(s)"), end="")
print(header_string % ("PID", "COMM",
"IP" if args.wide or args.csv else "", "LADDR",
"LPORT", "RADDR", "RPORT", "TX_KB", "RX_KB", "MS"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event, page_cnt=64)
b["ipv6_events"].open_perf_buffer(print_ipv6_event, page_cnt=64)
while 1:
b.kprobe_poll()
| {
"content_hash": "ebdd0aedeb13c04bcc824c278bf38a00",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 79,
"avg_line_length": 33.31666666666667,
"alnum_prop": 0.583791895947974,
"repo_name": "shodoco/bcc",
"id": "490e56860e29466edb2640b294a8cdd8e111a625",
"size": "11994",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/tcplife.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120942"
},
{
"name": "C++",
"bytes": "777779"
},
{
"name": "CMake",
"bytes": "28402"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "234464"
},
{
"name": "Makefile",
"bytes": "1480"
},
{
"name": "Objective-C",
"bytes": "21567"
},
{
"name": "P4",
"bytes": "9242"
},
{
"name": "Python",
"bytes": "328836"
},
{
"name": "Shell",
"bytes": "9047"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
} |
from raspledstrip.ledstrip import *
from raspledstrip.animation import *
from raspledstrip.color import Color
import os
import requests
import json
import time
import traceback
class Lumiere:
"""
Class to handle getting light information.
"""
def __init__(self):
"""
Constructor. Read settings if there.
"""
settings_file = 'settings.json';
self.settings = {};
if (os.path.isfile(settings_file)):
self.settings = json.loads(open(settings_file).read())
self.lights = self.settings['lights'] if 'lights' in self.settings else 160
self.api = self.settings['api'] if 'api' in self.settings else 'http://lumiere.lighting'
self.poll_interval = self.settings['poll_interval'] if 'poll_interval' in self.settings else 6
self.current_id = None
self.light_array = []
self.led = LEDStrip(self.lights)
self.led.all_off()
def listen(self):
"""
Handles the continual checking.
"""
while True:
try:
self.query_lumiere()
time.sleep(self.poll_interval)
except (KeyboardInterrupt, SystemExit):
raise
except:
print traceback.format_exc()
def set_lights(self):
"""
Change the lights.
"""
self.fill_lights()
# Animate
anim = FireFlies(self.led, self.light_array, 1, 1, 0, self.led.lastIndex)
for i in range(50):
anim.step()
self.led.update()
# Final fill
for li, l in enumerate(self.light_array):
self.led.set(li, l)
self.led.update()
def fill_lights(self):
"""
Fill up LED count with all the colors.
"""
self.light_array = []
light_array = []
length = len(self.current['colors'])
for x in range(0, self.lights - 1):
light_array.append(self.hex_to_rgb(self.current['colors'][x % length]))
for li, l in enumerate(light_array):
self.light_array.append(Color(l[0], l[1], l[2]))
def query_lumiere(self):
"""
Make request to API.
"""
r = requests.get('%s/api/colors' % (self.api))
if r.status_code == requests.codes.ok:
self.current = r.json()
# Only update if new record
if self.current_id is None or self.current_id != self.current['_id']:
self.current_id = self.current['_id']
self.set_lights()
def hex_to_rgb(self, value):
"""
Turns hex value to RGB tuple.
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
if __name__ == '__main__':
lumiere = Lumiere()
lumiere.listen()
| {
"content_hash": "2a1b4961f173a611ca4b504e868189b0",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 98,
"avg_line_length": 23.934579439252335,
"alnum_prop": 0.6114798906677079,
"repo_name": "lumiere-lighting/lumiere-node-raspberry-pi",
"id": "04233262aab8e4fb07ce7da1b51952f0b89ab128",
"size": "2584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lumiere.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4806"
}
],
"symlink_target": ""
} |
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Mapping, Sequence, Iterable
from functools import partial, reduce
from itertools import product
import numbers
import operator
import time
import warnings
import numpy as np
from numpy.ma import MaskedArray
from scipy.stats import rankdata
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ._validation import _aggregate_score_dicts
from ._validation import _insert_error_scores
from ._validation import _normalize_score_results
from ..exceptions import NotFittedError
from joblib import Parallel, delayed
from ..utils import check_random_state
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted, _check_fit_params
from ..utils.validation import _deprecate_positional_args
from ..utils.metaestimators import if_delegate_has_method
from ..metrics._scorer import _check_multimetric_scoring
from ..metrics import check_scoring
from ..utils import deprecated
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid:
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
The order of the generated parameter combinations is deterministic.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of str to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if not isinstance(param_grid, (Mapping, Iterable)):
raise TypeError('Parameter grid is not a dict or '
'a list ({!r})'.format(param_grid))
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
# check if all entries are dictionaries of lists
for grid in param_grid:
if not isinstance(grid, dict):
raise TypeError('Parameter grid is not a '
'dict ({!r})'.format(grid))
for key in grid:
if not isinstance(grid[key], Iterable):
raise TypeError('Parameter grid value is not iterable '
'(key={!r}, value={!r})'
.format(key, grid[key]))
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of str to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of str to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler:
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState instance, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
Returns
-------
params : dict of str to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4,
... random_state=rng))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
@_deprecate_positional_args
def __init__(self, param_distributions, n_iter, *, random_state=None):
if not isinstance(param_distributions, (Mapping, Iterable)):
raise TypeError('Parameter distribution is not a dict or '
'a list ({!r})'.format(param_distributions))
if isinstance(param_distributions, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_distributions = [param_distributions]
for dist in param_distributions:
if not isinstance(dist, dict):
raise TypeError('Parameter distribution is not a '
'dict ({!r})'.format(dist))
for key in dist:
if (not isinstance(dist[key], Iterable)
and not hasattr(dist[key], 'rvs')):
raise TypeError('Parameter value is not iterable '
'or distribution (key={!r}, value={!r})'
.format(key, dist[key]))
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = param_distributions
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = all(
all(not hasattr(v, "rvs") for v in dist.values())
for dist in self.param_distributions)
rng = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
n_iter = self.n_iter
if grid_size < n_iter:
warnings.warn(
'The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For exhaustive '
'searches, use GridSearchCV.'
% (grid_size, self.n_iter, grid_size), UserWarning)
n_iter = grid_size
for i in sample_without_replacement(grid_size, n_iter,
random_state=rng):
yield param_grid[i]
else:
for _ in range(self.n_iter):
dist = rng.choice(self.param_distributions)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(dist.items())
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rng)
else:
params[k] = v[rng.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
# FIXME Remove fit_grid_point in 0.25
@deprecated(
"fit_grid_point is deprecated in version 0.23 "
"and will be removed in version 0.25"
)
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score=np.nan, **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None
The scorer callable object / function must have its signature as
``scorer(estimator, X, y)``.
If ``None`` the estimator's score method is used.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
# NOTE we are not using the return value as the scorer by itself should be
# validated before. We use check_scoring only to reject multimetric scorer
check_scoring(estimator, scorer)
results = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params=fit_params,
return_n_test_samples=True,
error_score=error_score)
return results["test_scores"], parameters, results["n_test_samples"]
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter grid for parameter ({0}) needs to"
" be a list or numpy array, but got ({1})."
" Single values need to be wrapped in a list"
" with one element.".format(name, type(v)))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for hyper parameter search with cross-validation.
"""
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, scoring=None, n_jobs=None,
refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=True):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def _pairwise(self):
# allows cross-validation to see 'precomputed' metrics
return getattr(self.estimator, '_pairwise', False)
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if isinstance(self.scorer_, dict):
if self.multimetric_:
scorer = self.scorer_[self.refit]
else:
scorer = self.scorer_
return scorer(self.best_estimator_, X, y)
# callable
score = self.scorer_(self.best_estimator_, X, y)
if self.multimetric_:
score = score[self.refit]
return score
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def score_samples(self, X):
"""Call score_samples on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``score_samples``.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements
of the underlying estimator.
Returns
-------
y_score : ndarray, shape (n_samples,)
"""
self._check_is_fitted('score_samples')
return self.best_estimator_.score_samples(X)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt)
@property
def n_features_in_(self):
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the search estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute."
.format(self.__class__.__name__)
) from nfe
return self.best_estimator_.n_features_in_
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
Parameters
----------
evaluate_candidates : callable
This callback accepts a list of candidates, where each candidate is
a dict of parameter settings. It returns a dict of all results so
far, formatted like ``cv_results_``.
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
def _check_refit_for_multimetric(self, scores):
"""Check `refit` is compatible with `scores` is valid"""
multimetric_refit_msg = (
"For multi-metric scoring, the parameter refit must be set to a "
"scorer key or a callable to refit an estimator with the best "
"parameter setting on the whole data and make the best_* "
"attributes available for that metric. If this is not needed, "
f"refit should be set to False explicitly. {self.refit!r} was "
"passed.")
valid_refit_dict = (isinstance(self.refit, str) and
self.refit in scores)
if (self.refit is not False and not valid_refit_dict
and not callable(self.refit)):
raise ValueError(multimetric_refit_msg)
@_deprecate_positional_args
def fit(self, X, y=None, *, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
n_splits = cv.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
**fit_and_score_kwargs)
for (cand_idx, parameters),
(split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, groups))))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, n_splits, out):
n_candidates = len(candidate_params)
out = _aggregate_score_dicts(out)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_idx in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', out["fit_time"])
_store('score_time', out["score_time"])
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_idx, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_idx] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
test_scores_dict = _normalize_score_results(out["test_scores"])
if self.return_train_score:
train_scores_dict = _normalize_score_results(out["train_scores"])
for scorer_name in test_scores_dict:
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores_dict[scorer_name],
splits=True, rank=True,
weights=None)
if self.return_train_score:
_store('train_%s' % scorer_name,
train_scores_dict[scorer_name],
splits=True)
return results
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "score_samples", "predict", "predict_proba",
"decision_function", "transform" and "inverse_transform" if they are
implemented in the estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (`str`) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : str, callable, list/tuple or dict, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
pre_dispatch : int, or str, default=n_jobs
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : bool, str, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a `str` denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
- >1 : the computation time for each fold and parameter candidate is
displayed;
- >2 : the score is also displayed;
- >3 : the fold and candidate parameter indexes are also displayed
together with the starting time of the computation.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC()
>>> clf = GridSearchCV(svc, parameters)
>>> clf.fit(iris.data, iris.target)
GridSearchCV(estimator=SVC(),
param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
>>> sorted(clf.cv_results_.keys())
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split2_test_score', ...
'std_fit_time', 'std_score_time', 'std_test_score']
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.80 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.70 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.80 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.93 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
.. versionadded:: 0.20
Notes
-----
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
_required_parameters = ["estimator", "param_grid"]
@_deprecate_positional_args
def __init__(self, estimator, param_grid, *, scoring=None,
n_jobs=None, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=False):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "score_samples", "predict", "predict_proba",
"decision_function", "transform" and "inverse_transform" if they are
implemented in the estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
.. versionadded:: 0.14
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict or list of dicts
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : str, callable, list/tuple or dict, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
pre_dispatch : int, or str, default=None
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : bool, str, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a `str` denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given the ``cv_results``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``RandomizedSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState instance, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.80 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.84 |...| 3 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.70 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.80, 0.84, 0.70],
'split1_test_score' : [0.82, 0.50, 0.70],
'mean_test_score' : [0.81, 0.67, 0.70],
'std_test_score' : [0.01, 0.24, 0.00],
'rank_test_score' : [1, 3, 2],
'split0_train_score' : [0.80, 0.92, 0.70],
'split1_train_score' : [0.82, 0.55, 0.70],
'mean_train_score' : [0.81, 0.74, 0.70],
'std_train_score' : [0.01, 0.19, 0.00],
'mean_fit_time' : [0.73, 0.63, 0.43],
'std_fit_time' : [0.01, 0.02, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04],
'std_score_time' : [0.00, 0.00, 0.00],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
.. versionadded:: 0.20
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import RandomizedSearchCV
>>> from scipy.stats import uniform
>>> iris = load_iris()
>>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200,
... random_state=0)
>>> distributions = dict(C=uniform(loc=0, scale=4),
... penalty=['l2', 'l1'])
>>> clf = RandomizedSearchCV(logistic, distributions, random_state=0)
>>> search = clf.fit(iris.data, iris.target)
>>> search.best_params_
{'C': 2..., 'penalty': 'l1'}
"""
_required_parameters = ["estimator", "param_distributions"]
@_deprecate_positional_args
def __init__(self, estimator, param_distributions, *, n_iter=10,
scoring=None, n_jobs=None, refit=True,
cv=None, verbose=0, pre_dispatch='2*n_jobs',
random_state=None, error_score=np.nan,
return_train_score=False):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
evaluate_candidates(ParameterSampler(
self.param_distributions, self.n_iter,
random_state=self.random_state))
| {
"content_hash": "ee28aae436821026e2d85c7d3e412607",
"timestamp": "",
"source": "github",
"line_count": 1558,
"max_line_length": 82,
"avg_line_length": 41.353658536585364,
"alnum_prop": 0.577364230393146,
"repo_name": "bnaul/scikit-learn",
"id": "efc230ac080f28163cb0a699180d136e851e4e3b",
"size": "64429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/model_selection/_search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
} |
import numpy as np
from echometrics import *
from fixtures import data, make_simple_data
from nose import with_setup
def test_calc_metrics():
file = 'data/Sept17_DVM_int_cope38.csv'
echo = read_flat(file, ['Lat_M', 'Lon_M'], 'Layer', 'Sv_mean')
metrics_list = [depth_integral, sv_avg, center_of_mass, proportion_occupied,
aggregation_index, equivalent_area]
metrics = calc_metrics(echo, metrics_list)
assert type(metrics) == pandas.DataFrame
assert all(metrics.index == echo.index)
assert metrics.shape[0] == len(echo.index)
assert len(metrics.columns) == len(metrics_list) | {
"content_hash": "194c8ae97f35dd2ae386196ffed35ef9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 41.733333333333334,
"alnum_prop": 0.6853035143769968,
"repo_name": "ElOceanografo/EchoMetrics",
"id": "8681a9a75756ceb77e7caf66edcd01c7843cb8c5",
"size": "626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/echometrics/tests/test_batch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18345"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
| {
"content_hash": "9f4a50d6b02a93f006ccdfaa6bc04d4c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 30.125,
"alnum_prop": 0.7178423236514523,
"repo_name": "Crypt1k/Blog",
"id": "2594350a6f06eb0b7b087004b2f89d8bee9b5440",
"size": "241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "account/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48813"
}
],
"symlink_target": ""
} |
""" basecontroller.py - Classes to handle CRUD form for a model.
$Id: basecontroller.py 186 2011-05-28 06:19:05Z ats $
"""
__author__ = 'Atsushi Shibata <shibata@webcore.co.jp>'
__docformat__ = 'plaintext'
__licence__ = 'BSD'
import os
import new
import re
import logging
from urlparse import urlsplit
from Cookie import SimpleCookie
from aha import Config
from google.appengine.api import memcache
from google.appengine.ext.webapp import template
from django.template import Context, Template
from aha.controller.decorator import cache
class BaseController(object):
"""
The BaseController is the base class of action controllers.
Action controller handles the requests from clients.
"""
_template_ext = '.html'
def __init__(self, hnd, params = {}):
"""
An initialization method. It sets some attributes for combenience.
:param hnd : a request object.
:param params : parameters given via dispacher.
"""
self.hnd = hnd # hander itsrlf
self.controller = self # controller object
self.response = hnd.response # response object
self.request = hnd.request # request object
self.params = params # parameters
# update parameters when some GET/POST parameters are given.
for k in self.request.arguments():
self.params[k] = self.request.get_all(k)
if len(self.params[k]) == 1:
self.params[k] = self.params[k][0]
self._controller = params.get('controller', '') # controller as a string
self._action = params.get('action', 'index') # action as a string
self.has_rendered = False # reset the rendering flag
self.has_redirected = False # reset the redirect flag
self.__config = Config() # config object
self.__tpldir = os.path.join(
self.__config.template_dir,
self._controller
) # default template directory
self._template_values = {}
# implement parameter nesting as in rails
self.params = self.__nested_params(self.params)
# alias the cookies
self.cookies = self.request.cookies
# cookie for responce
self.post_cookie = SimpleCookie()
# create the session
try:
store = self.__config.session_store
exec('from aha.session.%s import %sSession' %
(store, store.capitalize()))
self.session = eval('%sSession' % store.capitalize())(
hnd, '%s_session' % self.__config.app_name)
except:
raise Exception('Initialize Session Error!')
# add request method (get, post, head, put, ....)
env = self.request.environ
self._request_method = env.get('REQUEST_METHOD').lower()
# tell if an ajax call (X-Request)
self._is_xhr = env.has_key('HTTP_X_REQUESTED_WITH') and \
env.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
# add helpers
import helper
self.helpers = helper.get_helpers()
try:
import application.util.helper
except ImportError, e:
pass
def before_action(self):
"""
A method called right before render() method.
You can do pre render jobs in this method, something like caching, etc.
"""
pass
def after_action(self):
"""
A method called right after render() method.
"""
pass
def from_json(self, json):
"""
Convert a JSON string to python object.
"""
from django.utils import simplejson
return simplejson.loads(json)
def to_json(self, obj):
"""
Convert a dict/list to JSON. Use simplejson.
"""
from django.utils import simplejson
return simplejson.dumps(obj)
def parse_opt(self, encode = 'utf-8', **opt):
"""
A method to parse the 'opt' argument and get a template.
It gets options as a keyword argument and parse them.
:param template : path to the template file.
:param html : raw html for the output.
:param text : raw text for the output.
:param json : raw json for the output.
:param xml : raw xml for the output.
:param script : raw java script for the output.
:param encode : encode for the output.
:param expires : expire date as a string.
"""
content = ''
template_path = ''
content_type = 'text/html; charset=%s' % encode
if opt.has_key('expires'):
hdrs['Expires'] = opt.get('expires')
if opt.has_key('html'):
content = opt.get('html').decode('utf-8')
elif opt.has_key('text'):
content_type = 'text/plain; charset=%s' % encode
content = opt.get('text')
elif opt.has_key('json'):
#content_type = 'text/plain; charset=%s' % encode
content_type = 'text/javascript; charset=%s' % encode
content = self.to_json(opt.get('json'))
elif opt.has_key('xml'):
content_type = 'text/xml; charset=%s' % encode
content = opt.get('xml')
elif opt.has_key('script'):
content_type = 'text/javascript; charset=%s' % encode
content = opt.get('script')
elif opt.has_key('template'):
tpname = opt.get('template')+self._template_ext
template_path = os.path.join(self._controller, tpname)
return content, template_path, content_type
def render(self, *html, **opt):
"""
A method to render output.
In BaseController, it uses App Engine's default Django template.
You may override this method when you make your own controller class
that uses other template engine.
It receives template string as non keyword argument, and
following arguments.
:param template : path to the template file.
:param html : raw html for the output.
:param text : raw text for the output.
:param json : raw json for the output.
:param xml : raw xml for the output.
:param script : raw java script for the output.
:param encode : encode for the output.
:param expires : expire date as a string.
:param context : the context dictionaly passed to template.
In case this argument doesn't exist, controller object will be used
as the context.
"""
hdrs = {}
content_type = 'text/html; charset=utf-8'
if html:
content = u''.join(html)
content_path = ''
template_path = ''
elif opt:
content, template_path, content_type = self.parse_opt(**opt)
context = opt.get('context', self.__dict__)
if isinstance(opt.get('values'), dict):
context.update(opt.get('values'))
# render content as a template
if template_path:
t= Template(template_path)
c = Context(context)
result = t.render(context)
elif content:
result = content
else:
raise Exception('Render type error')
hdrs['Content-Type'] = content_type
hdrs.update(opt.get('hdr', {}))
# pass the output to the response object
r = self.response
if hdrs:
for k, v in hdrs.items():
r.headers[k] = v
r.out.write(result)
self.has_rendered = True
def put_cookies(self):
"""
A method to put cookies to the response,
called in dispatch function,
or after render(), redirect() or called etc.
"""
if self.post_cookie.keys():
c = self.post_cookie
cs = c.output().replace('Set-Cookie: ', '')
self.response.headers.add_header('Set-Cookie', cs)
def redirect(self, url, perm = False):
"""
A method to redirect response.
:param url: the URL to redirect.
"""
self.has_redirected = True
self.has_rendered = True
# dirty hack, make aha don't find the template
self.hnd.redirect(url, perm)
def respond_to(self, **blk):
"""
according to self.params['format'] to respond appropriate stuff
"""
if self.params.has_key('format') and \
blk.has_key(self.params['format']):
logging.error(self.params['format'])
blk[self.params['format']]()
# Helper methods for parameter nesting as in rails
def __appender(self,dict,arr,val):
if len(arr) > 1:
try:
dict[arr[0]]
except KeyError:
dict[arr[0]] = {}
return {arr[0]: self.__appender(dict[arr[0]],arr[1:],val)}
else:
dict[arr[0]] = val
return
def __nested_params(self, prm):
prm2 = {}
for param in prm:
parray = param.replace(']', '').split('[')
if len(parray) == 1:
parray = parray[0].split('-')
self.__appender(prm2, parray, prm[param])
return prm2
def main(): pass;
| {
"content_hash": "6729ba978011e7933fea0aad07995e27",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 80,
"avg_line_length": 34.49640287769784,
"alnum_prop": 0.5481751824817518,
"repo_name": "Letractively/aha-gae",
"id": "c140fc522f89bb6c76925cc0fe56bdde12c9e284",
"size": "9948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aha/controller/basecontroller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51818"
},
{
"name": "HTML",
"bytes": "29371"
},
{
"name": "JavaScript",
"bytes": "39684"
},
{
"name": "Makefile",
"bytes": "50"
},
{
"name": "Python",
"bytes": "417917"
}
],
"symlink_target": ""
} |
import os
import sys
from setuptools import setup, find_packages, Extension
import numpy as np
import direfl
packages = ['direfl']
if len(sys.argv) == 1:
sys.argv.append('install')
# reflmodule extension
if sys.platform == "darwin":
# Python is not finding C++ headers on Mac unless the
# minimum OSX version is bumped from the default 10.6 up
# to 10.10. Don't know if this is because of the mac
# setup (older development libraries not installed) or
# because of the anaconda build (targetted to 10.6) or
# some combination. Override by setting the deployment
# target on the command line. Curiously, Xcode can
# target c++ code to 10.7 on the same machine.
#os.environ.setdefault('MACOSX_DEPLOYMENT_TARGET', '10.10')
os.environ.setdefault('MACOSX_DEPLOYMENT_TARGET', '10.13')
def reflmodule_config():
sources = [os.path.join('direfl','api','lib',f)
for f in ("reflmodule.cc","methods.cc",
"src/reflectivity.cc","src/magnetic.cc",
"src/reflrough.cc","src/resolution.c")]
module = Extension('direfl.api.reflmodule',
sources=sources,
include_dirs=[np.get_include()],
language="c++",
)
return module
short_desc = "DiRefl (Direct Inversion Reflectometry) GUI application"
long_desc = """\
The Direct Inversion Reflectometry GUI application generates a
scattering length density (SLD) profile of a thin film or free form
sample using two neutron scattering datasets without the need to
perform a fit of the data. DiRefl also has a simulation capability
for creating datasets from a simple model description of the sample."""
setup(name='direfl',
description=short_desc,
version = direfl.__version__,
long_description=long_desc,
author='University of Maryland, DANSE Reflectometry Group',
author_email='pkienzle@nist.gov',
url='http://reflectometry.org/danse',
license='BSD style license',
platforms='Windows, Linux, MacOSX',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
],
packages = find_packages(),
package_data = direfl.package_data(),
scripts = ['bin/direfl'],
ext_modules = [reflmodule_config()],
)
| {
"content_hash": "80ce6f9e5a5869a8beb22354f4ee280a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 71,
"avg_line_length": 36.273972602739725,
"alnum_prop": 0.6333081570996979,
"repo_name": "reflectometry/direfl",
"id": "f8c54256665224bc8bdbf65ec25ce23e7b9dce30",
"size": "2671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "17684"
},
{
"name": "C++",
"bytes": "45544"
},
{
"name": "Inno Setup",
"bytes": "7323"
},
{
"name": "Python",
"bytes": "447382"
},
{
"name": "Shell",
"bytes": "1265"
}
],
"symlink_target": ""
} |
"""Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
import _thread
from cherrypy._cpcompat import text_or_bytes
from cherrypy._cpcompat import ntob
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file
# has "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.
"""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform.
The :class:`SignalHandler` will ignore errors raised from attempting
to register handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log('SIGUSR1 cannot be set on the JVM platform. '
'Using SIGUSR2 instead.')
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
# used to determine is the process is a daemon in `self._is_daemonized`
self._original_pid = os.getpid()
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def _is_daemonized(self):
"""Return boolean indicating if the current process is
running as a daemon.
The criteria to determine the `daemon` condition is to verify
if the current pid is not the same as the one that got used on
the initial construction of the plugin *and* the stdin is not
connected to a terminal.
The sole validation of the tty is not enough when the plugin
is executing inside other process like in a CI tool
(Buildbot, Jenkins).
"""
return (
self._original_pid != os.getpid() and
not os.isatty(sys.stdin.fileno())
)
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log('Restoring %s handler to SIG_DFL.' % signame)
handler = _signal.SIG_DFL
else:
self.bus.log('Restoring %s handler %r.' % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log('Restored old %s handler %r, but our '
'handler was not registered.' %
(signame, handler), level=30)
except ValueError:
self.bus.log('Unable to restore %s handler %r.' %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, text_or_bytes):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError('No such signal: %r' % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError('No such signal: %r' % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log('Listening for %s.' % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log('Caught signal %s.' % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if self._is_daemonized():
self.bus.log('SIGHUP caught while daemonized. Restarting.')
self.bus.restart()
else:
# not daemonized (may be foreground or background)
self.bus.log('SIGHUP caught but not daemonized. Exiting.')
self.bus.exit()
try:
import pwd
import grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to `Gavin Baker
<http://antonym.org/2005/12/dropping-privileges-in-python.html>`_
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
@property
def uid(self):
"""The uid under which to run. Availability: Unix."""
return self._uid
@uid.setter
def uid(self, val):
if val is not None:
if pwd is None:
self.bus.log('pwd module not available; ignoring uid.',
level=30)
val = None
elif isinstance(val, text_or_bytes):
val = pwd.getpwnam(val)[2]
self._uid = val
@property
def gid(self):
"""The gid under which to run. Availability: Unix."""
return self._gid
@gid.setter
def gid(self, val):
if val is not None:
if grp is None:
self.bus.log('grp module not available; ignoring gid.',
level=30)
val = None
elif isinstance(val, text_or_bytes):
val = grp.getgrnam(val)[2]
self._gid = val
@property
def umask(self):
"""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
"""
return self._umask
@umask.setter
def umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log('umask function not available; ignoring umask.',
level=30)
val = None
self._umask = val
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process successfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.active_count() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
self.daemonize(self.stdin, self.stdout, self.stderr, self.bus.log)
self.finalized = True
start.priority = 65
@staticmethod
def daemonize(
stdin='/dev/null', stdout='/dev/null', stderr='/dev/null',
logger=lambda msg: None):
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
error_tmpl = (
'{sys.argv[0]}: fork #{n} failed: ({exc.errno}) {exc.strerror}\n'
)
for fork in range(2):
msg = ['Forking once.', 'Forking twice.'][fork]
try:
pid = os.fork()
if pid > 0:
# This is the parent; exit.
logger(msg)
os._exit(0)
except OSError as exc:
# Python raises OSError rather than returning negative numbers.
sys.exit(error_tmpl.format(sys=sys, exc=exc, n=fork + 1))
if fork == 0:
os.setsid()
os.umask(0)
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+')
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
logger('Daemonized to PID: %s' % os.getpid())
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
with open(self.pidfile, 'wb') as f:
f.write(ntob('%s\n' % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
class PerpetualTimer(threading.Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
'Error in perpetual timer thread function %r.' %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
super(BackgroundTask, self).__init__()
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log('Error in background task thread function %r.'
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>`
thread.
"""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus=self.bus)
self.thread.name = threadname
self.thread.start()
self.bus.log('Started monitor thread %r.' % threadname)
else:
self.bus.log('Monitor thread %r already started.' % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log('No thread running for %s.' %
self.name or self.__class__.__name__)
else:
if self.thread is not threading.current_thread():
name = self.thread.name
self.thread.cancel()
if not self.thread.daemon:
self.bus.log('Joining %r' % name)
self.thread.join()
self.bus.log('Stopped thread %r.' % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can
adjust the ``match`` attribute, a regular expression. For example,
to stop monitoring cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
search_mod_names = filter(
re.compile(self.match).match,
list(sys.modules.keys()),
)
mods = map(sys.modules.get, search_mod_names)
return set(filter(None, map(self._file_for_module, mods)))
@classmethod
def _file_for_module(cls, module):
"""Return the relevant file for the module."""
return (
cls._archive_for_zip_module(module)
or cls._file_for_file_module(module)
)
@staticmethod
def _archive_for_zip_module(module):
"""Return the archive filename for the module if relevant."""
try:
return module.__loader__.archive
except AttributeError:
pass
@classmethod
def _file_for_file_module(cls, module):
"""Return the file for the module."""
try:
return module.__file__ and cls._make_absolute(module.__file__)
except AttributeError:
pass
@staticmethod
def _make_absolute(filename):
"""Ensure filename is absolute to avoid effect of os.chdir."""
return filename if os.path.isabs(filename) else (
os.path.normpath(os.path.join(_module__file__base, filename))
)
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log('Restarting because %s changed.' %
filename)
self.thread.cancel()
self.bus.log('Stopped thread %r.' %
self.thread.name)
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = _thread.get_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = _thread.get_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| {
"content_hash": "268bd28adadb6637ab2a6a8ec237a1de",
"timestamp": "",
"source": "github",
"line_count": 755,
"max_line_length": 79,
"avg_line_length": 35.557615894039735,
"alnum_prop": 0.5768084630857483,
"repo_name": "cherrypy/cherrypy",
"id": "e96fb1ce2a7f732b1f0c3d1431c521c33fc2d675",
"size": "26846",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cherrypy/process/plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "510"
},
{
"name": "Python",
"bytes": "984166"
}
],
"symlink_target": ""
} |
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
import os
class WalletHDTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
| {
"content_hash": "e4f6827d75c8af4b8f863a1b55f85480",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 113,
"avg_line_length": 42.26271186440678,
"alnum_prop": 0.6174052536595147,
"repo_name": "ppcoin/ppcoin",
"id": "9f0e9acb47768ac58601a17d677f30a7d376edaa",
"size": "5201",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "test/functional/wallet_hd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7948"
},
{
"name": "C++",
"bytes": "1432773"
},
{
"name": "Groff",
"bytes": "12841"
},
{
"name": "Makefile",
"bytes": "4292"
},
{
"name": "NSIS",
"bytes": "6134"
},
{
"name": "Objective-C++",
"bytes": "2463"
},
{
"name": "Python",
"bytes": "50532"
},
{
"name": "QMake",
"bytes": "10567"
},
{
"name": "Shell",
"bytes": "1849"
}
],
"symlink_target": ""
} |
import os
import shutil
import zipfile
import tarfile
import natsort
import tempfile
from codecs import open
from . import util
DEFAULT_PROJECT_DESCRIPTION = 'No project description'
def sort_by_version(x):
# See http://natsort.readthedocs.io/en/stable/examples.html
return x['version'].replace('.', '~') + 'z'
def _is_valid_doc_version(folder):
"""
Test if a version folder contains valid documentation.
A vaersion folder contains documentation if:
- is a directory
- contains an `index.html` file
"""
if not os.path.isdir(folder):
return False
if not os.path.exists(os.path.join(folder, 'index.html')):
return False
return True
def _get_proj_dict(docfiles_dir, proj_dir, link_root):
"""
Lookup for the configuration of a project.
The project configuration is a :class:`dict` with the following data:
- "name": the name of the project
- "description": the description of the project
- "versions": the list of the versions of the documentation. For each
version, there is a :class:`dict` with:
- "version": name of the version
- "link": the relative url of the version
If no valid versions have been found, returns ``None``.
"""
def join_with_default_path(*a):
return os.path.join(docfiles_dir, proj_dir, *a)
allpaths = os.listdir(join_with_default_path())
versions = [
dict(version=p, link='%s/%s/%s/index.html' % (link_root, proj_dir, p))
for p in allpaths if _is_valid_doc_version(join_with_default_path(p))
]
if len(versions) == 0:
return None
versions = natsort.natsorted(versions, key=sort_by_version)
descr = DEFAULT_PROJECT_DESCRIPTION
if 'description.txt' in allpaths:
dpath = join_with_default_path('description.txt')
with open(dpath, 'r', encoding='utf-8') as f:
descr = f.read().strip()
return {'name': proj_dir, 'versions': versions, 'description': descr}
def parse_docfiles(docfiles_dir, link_root):
"""
Create the list of the projects.
The list of projects is computed by walking the `docfiles_dir` and
searching for project paths (<project-name>/<version>/index.html)
"""
if not os.path.exists(docfiles_dir):
return {}
projects = list()
for folder in natsort.natsorted(os.listdir(docfiles_dir), key=str.lower):
if not os.path.isdir(os.path.join(docfiles_dir, folder)):
continue
project = _get_proj_dict(docfiles_dir, folder, link_root)
if project is not None:
projects.append(project)
return projects
def find_root_dir(compressed_file, file_ext = ".html"):
"""
Determines the documentation root directory by searching the top-level index file.
"""
if isinstance(compressed_file, zipfile.ZipFile):
index_files = [member.filename for member in compressed_file.infolist()
if not member.is_dir() and os.path.basename(member.filename) == f"index{file_ext}"]
elif isinstance(compressed_file, tarfile.TarFile):
index_files = [member.name for member in compressed_file.getmembers()
if member.isfile() and os.path.basename(member.name) == f"index{file_ext}"]
else:
raise TypeError(f"Invalid archive file type: {type(compressed_file)}")
if not index_files:
raise FileNotFoundError("Failed to find root index file!")
root_index_file = sorted(index_files, key = lambda filename: len(filename.split(os.sep)))[0]
return os.path.dirname(root_index_file)
def unpack_project(uploaded_file, proj_metadata, docfiles_dir):
projdir = os.path.join(docfiles_dir, proj_metadata['name'])
verdir = os.path.join(projdir, proj_metadata['version'])
if not os.path.isdir(verdir):
os.makedirs(verdir)
# Overwrite project description only if a (non empty) new one has been
# provided
descr = proj_metadata.get('description', '')
if len(descr) > 0:
descrpath = os.path.join(projdir, 'description.txt')
with open(descrpath, 'w', encoding='utf-8') as f:
f.write(descr)
# This is insecure, we are only accepting things from trusted sources.
with util.FileExpander(uploaded_file) as compressed_file:
# Determine documentation root dir by finding top-level index file
root_dir = find_root_dir(compressed_file)
# Extract full archive to temporary directory
temp_dir = tempfile.mkdtemp()
compressed_file.extractall(temp_dir)
# Then, only move root directory to target dir
shutil.rmtree(verdir) # clear possibly existing target dir
shutil.move(os.path.join(temp_dir, root_dir), verdir) # only move documentation root dir
if os.path.isdir(temp_dir): # cleanup temporary directory (if it still exists)
shutil.rmtree(temp_dir)
def valid_name(s):
"""See readme for what's valid.
:type s: str
"""
for c in s:
if not (c.isalnum() or c in ' -_'):
return False
return True
def valid_version(s):
"""See readme for what's valid.
:type s: str
"""
for c in s:
if not (c.isalnum() or c == '.'):
return False
return True
def delete_files(name, version, docfiles_dir, entire_project=False):
remove = os.path.join(docfiles_dir, name)
if not entire_project:
remove = os.path.join(remove, version)
if os.path.exists(remove):
shutil.rmtree(remove)
def _has_latest(versions):
return any(v['version'] == 'latest' for v in versions)
def insert_link_to_latest(projects, template):
"""For each project in ``projects``,
will append a "latest" version that links to a certain location
(should not be to static files).
Will not add a "latest" version if it already exists.
:param projects: Project dicts to mutate.
:param template: String to turn into a link.
Should have a ``%(project)s`` that will be replaced with the project name.
"""
for p in projects:
if _has_latest(p['versions']):
continue
link = template % dict(project=p['name'])
p['versions'].append(dict(version='latest', link=link))
| {
"content_hash": "028e08e9f850d68cecf0627a3dc6b312",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 106,
"avg_line_length": 32.55958549222798,
"alnum_prop": 0.6441756842775302,
"repo_name": "rgalanakis/hostthedocs",
"id": "8105baf46f7b68c4276c358ec7b3657936273d43",
"size": "6284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hostthedocs/filekeeper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "4849"
},
{
"name": "Python",
"bytes": "33847"
}
],
"symlink_target": ""
} |
import sys
import os
import registry_xml
template_gl_enums_header = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// gl_enum_utils_autogen.h:
// mapping of GLenum value to string.
# ifndef LIBANGLE_GL_ENUM_UTILS_AUTOGEN_H_
# define LIBANGLE_GL_ENUM_UTILS_AUTOGEN_H_
namespace gl
{{
enum class GLenumGroup
{{
{gl_enum_groups}
}};
}} // namespace gl
# endif // LIBANGLE_GL_ENUM_UTILS_AUTOGEN_H_
"""
template_gl_enums_source = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// gl_enum_utils_autogen.cpp:
// mapping of GLenum value to string.
#include "libANGLE/capture/gl_enum_utils_autogen.h"
#include "libANGLE/capture/gl_enum_utils.h"
namespace gl
{{
namespace
{{
const char *UnknownGLenumToString(unsigned int value)
{{
constexpr size_t kBufferSize = 64;
static thread_local char sBuffer[kBufferSize];
snprintf(sBuffer, kBufferSize, "0x%04X", value);
return sBuffer;
}}
}} // anonymous namespace
const char *GLenumToString(GLenumGroup enumGroup, unsigned int value)
{{
switch (enumGroup)
{{
{gl_enums_value_to_string_table}
default:
return UnknownGLenumToString(value);
}}
}}
}} // namespace gl
"""
template_enum_group_case = """case GLenumGroup::{group_name}: {{
switch (value) {{
{inner_group_cases}
default:
return UnknownGLenumToString(value);
}}
}}
"""
template_enum_value_to_string_case = """case {value}: return {name};"""
exclude_gl_enums = {
'GL_NO_ERROR', 'GL_TIMEOUT_IGNORED', 'GL_INVALID_INDEX', 'GL_VERSION_ES_CL_1_0',
'GL_VERSION_ES_CM_1_1', 'GL_VERSION_ES_CL_1_1'
}
exclude_gl_enum_groups = {'SpecialNumbers'}
def dump_value_to_string_mapping(gl_enum_in_groups, exporting_enums):
exporting_groups = list()
for group_name, inner_mapping in gl_enum_in_groups.items():
string_value_pairs = list(filter(lambda x: x[0] in exporting_enums, inner_mapping.items()))
if not string_value_pairs:
continue
# sort according values
string_value_pairs.sort(key=lambda x: (x[1], x[0]))
# remove all duplicate values from the pairs list
# some value may have more than one GLenum mapped to them, such as:
# GL_DRAW_FRAMEBUFFER_BINDING and GL_FRAMEBUFFER_BINDING
# GL_BLEND_EQUATION_RGB and GL_BLEND_EQUATION
# it is safe to output either one of them, for simplity here just
# choose the shorter one which comes first in the sorted list
exporting_string_value_pairs = list()
for index, pair in enumerate(string_value_pairs):
if index == 0 or pair[1] != string_value_pairs[index - 1][1]:
exporting_string_value_pairs.append(pair)
inner_code_block = "\n".join([
template_enum_value_to_string_case.format(
value='0x%X' % value,
name='"%s"' % name,
) for name, value in exporting_string_value_pairs
])
exporting_groups.append((group_name, inner_code_block))
return "\n".join([
template_enum_group_case.format(
group_name=group_name,
inner_group_cases=inner_code_block,
) for group_name, inner_code_block in sorted(exporting_groups, key=lambda x: x[0])
])
def main(header_output_path, source_output_path):
xml = registry_xml.RegistryXML('gl.xml', 'gl_angle_ext.xml')
# build a map from GLenum name to its value
all_gl_enums = dict()
for enums_node in xml.root.findall('enums'):
for enum in enums_node.findall('enum'):
name = enum.attrib['name']
value = int(enum.attrib['value'], base=16)
all_gl_enums[name] = value
# Parse groups of GLenums to build a {group, name} -> value mapping.
gl_enum_in_groups = dict()
enums_has_group = set()
for enums_group_node in xml.root.findall('groups/group'):
group_name = enums_group_node.attrib['name']
if group_name in exclude_gl_enum_groups:
continue
if group_name not in gl_enum_in_groups:
gl_enum_in_groups[group_name] = dict()
for enum_node in enums_group_node.findall('enum'):
enum_name = enum_node.attrib['name']
enums_has_group.add(enum_name)
gl_enum_in_groups[group_name][enum_name] = all_gl_enums[enum_name]
# Find relevant GLenums according to enabled APIs and extensions.
exporting_enums = set()
# export all the apis
xpath = "./feature[@api='gles2']/require/enum"
for enum_tag in xml.root.findall(xpath):
enum_name = enum_tag.attrib['name']
if enum_name not in exclude_gl_enums:
exporting_enums.add(enum_name)
for extension in registry_xml.supported_extensions:
xpath = "./extensions/extension[@name='%s']/require/enum" % extension
for enum_tag in xml.root.findall(xpath):
enum_name = enum_tag.attrib['name']
if enum_name not in exclude_gl_enums:
exporting_enums.add(enum_name)
# For enums that do not have a group, add them to a default group
default_group_name = registry_xml.default_enum_group_name
gl_enum_in_groups[default_group_name] = dict()
default_group = gl_enum_in_groups[default_group_name]
for enum_name in exporting_enums:
if enum_name not in enums_has_group:
default_group[enum_name] = all_gl_enums[enum_name]
# Write GLenum groups into the header file.
header_content = template_gl_enums_header.format(
script_name=os.path.basename(sys.argv[0]),
data_source_name="gl.xml and gl_angle_ext.xml",
gl_enum_groups=',\n'.join(sorted(gl_enum_in_groups.keys())))
header_output_path = registry_xml.script_relative(header_output_path)
with open(header_output_path, 'w') as f:
f.write(header_content)
# Write mapping to source file
gl_enums_value_to_string_table = dump_value_to_string_mapping(gl_enum_in_groups,
exporting_enums)
source_content = template_gl_enums_source.format(
script_name=os.path.basename(sys.argv[0]),
data_source_name="gl.xml and gl_angle_ext.xml",
gl_enums_value_to_string_table=gl_enums_value_to_string_table,
)
source_output_path = registry_xml.script_relative(source_output_path)
with open(source_output_path, 'w') as f:
f.write(source_content)
return 0
if __name__ == '__main__':
inputs = [
'gl.xml',
'gl_angle_ext.xml',
'registry_xml.py',
]
gl_enum_utils_autogen_base_path = '../src/libANGLE/capture/gl_enum_utils_autogen'
outputs = [
gl_enum_utils_autogen_base_path + '.h',
gl_enum_utils_autogen_base_path + '.cpp',
]
if len(sys.argv) > 1:
if sys.argv[1] == 'inputs':
print(','.join(inputs))
elif sys.argv[1] == 'outputs':
print(','.join(outputs))
else:
sys.exit(
main(
registry_xml.script_relative(outputs[0]),
registry_xml.script_relative(outputs[1])))
| {
"content_hash": "332cfb6f04242d18966445f70f0039b3",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 99,
"avg_line_length": 33.66222222222222,
"alnum_prop": 0.6279376815421178,
"repo_name": "ppy/angle",
"id": "f849a7252b4d7242c80a76b8b412b794ae4226f2",
"size": "7922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/gen_gl_enum_utils.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "17281"
},
{
"name": "C",
"bytes": "562758"
},
{
"name": "C++",
"bytes": "7776807"
},
{
"name": "Lex",
"bytes": "26383"
},
{
"name": "Objective-C",
"bytes": "18506"
},
{
"name": "Objective-C++",
"bytes": "25649"
},
{
"name": "PostScript",
"bytes": "989"
},
{
"name": "Python",
"bytes": "61989"
},
{
"name": "Shell",
"bytes": "1461"
},
{
"name": "Yacc",
"bytes": "61666"
}
],
"symlink_target": ""
} |
import base64
import collections
import json
from contextlib import nested
import mock
import os
import random
import string
import zipfile
import re
import unittest
from click.exceptions import ClickException
from lambda_packages import lambda_packages
from .utils import placebo_session, patch_open
from zappa.cli import ZappaCLI, shamelessly_promote
from zappa.ext.django_zappa import get_django_wsgi
from zappa.handler import LambdaHandler, lambda_handler
from zappa.letsencrypt import get_cert_and_update_domain, create_domain_key, create_domain_csr, create_chained_certificate, get_cert, cleanup, parse_account_key, parse_csr, sign_certificate, encode_certificate, register_account, verify_challenge
from zappa.util import (detect_django_settings, copytree, detect_flask_apps,
add_event_source, remove_event_source,
get_event_source_status, parse_s3_url)
from zappa.wsgi import create_wsgi_request, common_log
from zappa.zappa import Zappa, ASSUME_POLICY, ATTACH_POLICY
def random_string(length):
return ''.join(random.choice(string.printable) for _ in range(length))
class TestZappa(unittest.TestCase):
def setUp(self):
self.sleep_patch = mock.patch('time.sleep', return_value=None)
# Tests expect us-east-1.
# If the user has set a different region in env variables, we set it aside for now and use us-east-1
self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None)
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.start()
def tearDown(self):
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.stop()
del os.environ['AWS_DEFAULT_REGION']
if self.users_current_region_name is not None:
# Give the user their AWS region back, we're done testing with us-east-1.
os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name
@placebo_session
def test_upload_remove_s3(self, session):
bucket_name = 'test_zappa_upload_s3'
z = Zappa(session)
zip_path = z.create_lambda_zip(minify=False)
res = z.upload_to_s3(zip_path, bucket_name)
os.remove(zip_path)
self.assertTrue(res)
s3 = session.resource('s3')
# will throw ClientError with 404 if bucket doesn't exist
s3.meta.client.head_bucket(Bucket=bucket_name)
# will throw ClientError with 404 if object doesn't exist
s3.meta.client.head_object(
Bucket=bucket_name,
Key=zip_path,
)
res = z.remove_from_s3(zip_path, bucket_name)
self.assertTrue(res)
fail = z.upload_to_s3('/tmp/this_isnt_real', bucket_name)
self.assertFalse(fail)
@placebo_session
def test_create_lambda_function(self, session):
bucket_name = 'lmbda'
zip_path = 'Spheres-dev-1454694878.zip'
z = Zappa(session)
z.aws_region = 'us-east-1'
z.load_credentials(session)
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
arn = z.create_lambda_function(
bucket=bucket_name,
s3_key=zip_path,
function_name='test_lmbda_function55',
handler='runme.lambda_handler'
)
arn = z.update_lambda_function(
bucket=bucket_name,
s3_key=zip_path,
function_name='test_lmbda_function55',
)
@placebo_session
def test_rollback_lambda_function_version(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution'
function_name = 'django-helloworld-unicode'
too_many_versions = z.rollback_lambda_function_version(function_name, 99999)
self.assertFalse(too_many_versions)
function_arn = z.rollback_lambda_function_version(function_name, 1)
@placebo_session
def test_invoke_lambda_function(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution'
function_name = 'django-helloworld-unicode'
payload = '{"event": "hello"}'
response = z.invoke_lambda_function(function_name, payload)
@placebo_session
def test_create_iam_roles(self, session):
z = Zappa(session)
arn, updated = z.create_iam_roles()
self.assertEqual(arn, "arn:aws:iam::123:role/{}".format(z.role_name))
@placebo_session
def test_get_api_url(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution'
url = z.get_api_url('Spheres-demonstration', 'demonstration')
@placebo_session
def test_fetch_logs(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
events = z.fetch_logs('Spheres-demonstration')
self.assertTrue(events is not None)
##
# Handler
##
@placebo_session
def test_handler(self, session):
# Init will test load_remote_settings
lh = LambdaHandler('test_settings', session=session)
# Annoyingly, this will fail during record, but
# the result will actually be okay to use in playback.
# See: https://github.com/garnaat/placebo/issues/48
self.assertEqual(os.environ['hello'], 'world')
event = {
"body": {},
"headers": {},
"params": {
"parameter_1": "asdf1",
"parameter_2": "asdf2",
},
"method": "GET",
"query": {}
}
lh.handler(event, None)
# Test scheduled event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'detail-type': u'Scheduled Event',
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
lh.handler(event, None)
# Test command event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'command': u'test_settings.command',
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
lh.handler(event, None)
# Test raw_command event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'raw_command': u'print("check one two")',
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
lh.handler(event, None)
# Test AWS S3 event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'Records': [{'s3': {'configurationId': 'test_settings.aws_s3_event'}}],
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
self.assertEqual("AWS S3 EVENT", lh.handler(event, None))
# Test AWS SNS event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'Records': [
{
u'EventVersion': u'1.0',
u'EventSource': u'aws:sns',
u'EventSubscriptionArn': u'arn:aws:sns:EXAMPLE',
u'Sns': {
u'SignatureVersion': u'1',
u'Timestamp': u'1970-01-01T00:00:00.000Z',
u'Signature': u'EXAMPLE',
u'SigningCertUrl': u'EXAMPLE',
u'MessageId': u'95df01b4-ee98-5cb9-9903-4c221d41eb5e',
u'Message': u'Hello from SNS!',
u'Subject': u'TestInvoke',
u'Type': u'Notification',
u'UnsubscribeUrl': u'EXAMPLE',
u'TopicArn': u'arn:aws:sns:1',
u'MessageAttributes': {
u'Test': {u'Type': u'String', u'Value': u'TestString'},
u'TestBinary': {u'Type': u'Binary', u'Value': u'TestBinary'}
}
}
}
]
}
self.assertEqual("AWS SNS EVENT", lh.handler(event, None))
# Test AWS DynamoDB event
event = {
u'Records': [
{
u'eventID': u'1',
u'eventVersion': u'1.0',
u'dynamodb': {
u'Keys': {u'Id': {u'N': u'101'}},
u'NewImage': {u'Message': {u'S': u'New item!'}, u'Id': {u'N': u'101'}},
u'StreamViewType': u'NEW_AND_OLD_IMAGES',
u'SequenceNumber': u'111', u'SizeBytes': 26
},
u'awsRegion': u'us-west-2',
u'eventName': u'INSERT',
u'eventSourceARN': u'arn:aws:dynamodb:1',
u'eventSource': u'aws:dynamodb'
}
]
}
self.assertEqual("AWS DYNAMODB EVENT", lh.handler(event, None))
# Test AWS kinesis event
event = {
u'Records': [
{
u'eventID': u'shardId-000000000000:49545115243490985018280067714973144582180062593244200961',
u'eventVersion': u'1.0',
u'kinesis': {
u'partitionKey': u'partitionKey-3',
u'data': u'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=',
u'kinesisSchemaVersion': u'1.0',
u'sequenceNumber': u'49545115243490985018280067714973144582180062593244200961'
},
u'invokeIdentityArn': u'arn:aws:iam::EXAMPLE',
u'eventName': u'aws:kinesis:record',
u'eventSourceARN': u'arn:aws:kinesis:1',
u'eventSource': u'aws:kinesis',
u'awsRegion': u'us-east-1'
}
]
}
self.assertEqual("AWS KINESIS EVENT", lh.handler(event, None))
# Test Authorizer event
event = {u'authorizationToken': u'hubtoken1', u'methodArn': u'arn:aws:execute-api:us-west-2:1234:xxxxx/dev/GET/v1/endpoint/param', u'type': u'TOKEN'}
self.assertEqual("AUTHORIZER_EVENT", lh.handler(event, None))
# Ensure Zappa does return 401 if no function was defined.
lh.settings.AUTHORIZER_FUNCTION = None
with self.assertRaisesRegexp(Exception, 'Unauthorized'):
lh.handler(event, None)
# Unhandled event
event = {
u'Records': [
{
u'eventID': u'shardId-000000000000:49545115243490985018280067714973144582180062593244200961',
u'eventVersion': u'1.0',
u'kinesis': {
u'partitionKey': u'partitionKey-3',
u'data': u'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=',
u'kinesisSchemaVersion': u'1.0',
u'sequenceNumber': u'49545115243490985018280067714973144582180062593244200961'
},
u'eventSourceARN': u'bad:arn:1',
}
]
}
self.assertIsNone(lh.handler(event, None))
##
# CLI
##
@placebo_session
def test_cli_aws(self, session):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.api_key_required = True
zappa_cli.authorization_type = 'NONE'
zappa_cli.load_settings('test_settings.json', session)
zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
zappa_cli.deploy()
zappa_cli.update()
zappa_cli.rollback(1)
zappa_cli.tail(since=0, filter_pattern='', keep_open=False)
zappa_cli.schedule()
zappa_cli.unschedule()
zappa_cli.undeploy(noconfirm=True, remove_logs=True)
@placebo_session
def test_cli_aws_status(self, session):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json', session)
zappa_cli.api_stage = 'devor'
zappa_cli.lambda_name = 'baby-flask-devor'
zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
resp = zappa_cli.status()
##
# Let's Encrypt / ACME
##
##
# Django
##
##
# Util / Misc
##
@placebo_session
def test_add_event_source(self, session):
event_source = {'arn': 'blah:blah:blah:blah', 'events': [
"s3:ObjectCreated:*"
]}
# Sanity. This should fail.
try:
es = add_event_source(event_source, 'blah:blah:blah:blah', 'test_settings.callback', session)
self.fail("Success should have failed.")
except ValueError:
pass
event_source = {'arn': 's3:s3:s3:s3', 'events': [
"s3:ObjectCreated:*"
]}
add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True)
remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True)
# get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4ddb2355bf93c9d8c171a5cc44f128ee",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 245,
"avg_line_length": 38.6266318537859,
"alnum_prop": 0.5431255914559957,
"repo_name": "parroyo/Zappa",
"id": "d83e3ed1ccbf99dc009c70cac0e5c99bbe87957e",
"size": "14817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests_placebo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "323663"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
} |
""" Tutorial for chi.model
----------------------
This is how we can use python functions to define models
"""
import os
import chi
import tensorflow as tf
from tensorflow.contrib import layers # Keras-style layers
from tensorflow.contrib import learn
chi.set_loglevel('debug') # log whenever variables are created or shared
@chi.model
def my_digit_classifier(x: (None, 28 * 28)): # specify shape as (None, 28*28)
x = layers.fully_connected(x, 100)
z = layers.fully_connected(x, 10, None)
p = layers.softmax(z)
return z, p
@chi.function
def train(x, labels: tf.int32):
z, p = my_digit_classifier(x) # create model parameters (first usage of my_model)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=z)
return my_digit_classifier.minimize(loss)
# now that the model has been used once and
# its internal variables hav been created we can get
print('\n'.join([v.name for v in my_digit_classifier.trainable_variables()]))
@chi.function
def test(x, labels: tf.int64):
z, p = my_digit_classifier(x) # reuse model parameters
correct_prediction = tf.equal(tf.argmax(p, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
# now we can train that to classify handwritten digits
datapath = os.path.join(os.path.expanduser('~'), '.chi', 'datasets', 'mnist')
dataset = learn.datasets.mnist.read_data_sets(datapath)
for i in range(10000):
images, labels = dataset.train.next_batch(64)
loss = train(images, labels)
if i % 100 == 0:
accuracy = test(*dataset.test.next_batch(1024))
print('accuracy =', accuracy, 'after', i, 'minibatches')
| {
"content_hash": "36f81a66d266229e184dcc470a5ea62c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 84,
"avg_line_length": 32.450980392156865,
"alnum_prop": 0.7045317220543806,
"repo_name": "rmst/chi",
"id": "05a90a6a158017a36ad9dbff022aec1592672641",
"size": "1655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23770"
},
{
"name": "Python",
"bytes": "145895"
}
],
"symlink_target": ""
} |
import django
from django.conf import settings
settings.configure(
DEBUG=True,
DATABASES={"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:"
}}
)
settings.INSTALLED_APPS += ("label_tag_attr", )
django.setup()
| {
"content_hash": "affa4667c5c7929a8cfb34135b204602",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 47,
"avg_line_length": 20,
"alnum_prop": 0.6346153846153846,
"repo_name": "c-bata/django-label-tag-attr",
"id": "f63d83da11bced8f8171a148857c3071971f973f",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "label_tag_attr/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3184"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/2h_sword/crafted_saber/shared_sword_lightsaber_two_handed_s10.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_2h")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "b94222fbae1eba496ecad8a43b2b32bb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 106,
"avg_line_length": 26.615384615384617,
"alnum_prop": 0.7138728323699421,
"repo_name": "anhstudios/swganh",
"id": "b950edc92a4247cc04eab313092c2ae098cae41a",
"size": "491",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/weapon/melee/2h_sword/crafted_saber/shared_sword_lightsaber_two_handed_s10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import json
import requests
from uritemplate import URITemplate
from mapbox.services.base import Service
# Constants
BASE_URI = 'https://api.mapbox.com/datasets/v1'
class Datasets(Service):
"""Access to the Datasets API."""
baseuri = 'https://api.mapbox.com/datasets/v1'
def _attribs(self, name=None, description=None):
"""Form an attributes dictionary from keyword args."""
a = {}
if name:
a['name'] = name
if description:
a['description'] = description
return a
def create(self, name=None, description=None):
"""Create a new dataset.
Returns a :class:`requests.Response` containing the attributes
of the new dataset as a JSON object.
:param name: the dataset name (optional).
:param description: the dataset description (optional).
"""
uri = URITemplate(self.baseuri + '/{owner}').expand(
owner=self.username)
return self.session.post(uri, json=self._attribs(name, description))
def list(self):
"""List datasets.
Returns a :class:`requests.Response` containing a list of
objects describing datasets.
"""
uri = URITemplate(self.baseuri + '/{owner}').expand(
owner=self.username)
return self.session.get(uri)
def read_dataset(self, dataset):
"""Read the attributes of a dataset.
Returns a :class:`requests.Response` containing the attributes
as a JSON object. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
:param dataset: the dataset identifier string.
"""
uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(
owner=self.username, id=dataset)
return self.session.get(uri)
def update_dataset(self, dataset, name=None, description=None):
"""Update the name and description of a dataset.
Returns a :class:`requests.Response` containing the updated
attributes as a JSON object.
:param dataset: the dataset identifier string.
:param name: the dataset name.
:param description: the dataset description.
"""
uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(
owner=self.username, id=dataset)
return self.session.patch(uri, json=self._attribs(name, description))
def delete_dataset(self, dataset):
"""Delete a dataset.
:param dataset: the dataset identifier string.
"""
uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(
owner=self.username, id=dataset)
return self.session.delete(uri)
def list_features(self, dataset, reverse=False, start=None, limit=None):
"""Get features of a dataset.
Returns a :class:`requests.Response` containing the features of
the dataset as a GeoJSON feature collection.
:param dataset: the dataset identifier string.
"""
uri = URITemplate(
self.baseuri + '/{owner}/{id}/features').expand(
owner=self.username, id=dataset)
params = {}
if reverse:
params['reverse'] = 'true'
if start:
params['start'] = start
if limit:
params['limit'] = int(limit)
return self.session.get(uri, params=params)
def batch_update_features(self, dataset, put=None, delete=None):
"""Update features of a dataset.
Up to 100 features may be deleted or modified in one request.
:param dataset: the dataset identifier string.
:param put: an array of GeoJSON features to be created or
modified with the semantics of HTTP PUT.
:param delete: an array of feature ids to be deleted with
the semantics of HTTP DELETE.
"""
uri = URITemplate(self.baseuri + '/{owner}/{id}/features').expand(
owner=self.username, id=dataset)
updates = {}
if put:
updates['put'] = put
if delete:
updates['delete'] = delete
return self.session.post(uri, json=updates)
def read_feature(self, dataset, fid):
"""Read a dataset feature.
Returns a :class:`requests.Response` containing a GeoJSON
representation of the feature.
:param dataset: the dataset identifier string.
:param fid: the feature identifier string.
"""
uri = URITemplate(
self.baseuri + '/{owner}/{did}/features/{fid}').expand(
owner=self.username, did=dataset, fid=fid)
return self.session.get(uri)
def update_feature(self, dataset, fid, feature):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
:class:`requests.Response` containing a GeoJSON representation
of the new or updated feature.
:param dataset: the dataset identifier string.
:param fid: the feature identifier string.
:param feature: a GeoJSON feature object.
"""
uri = URITemplate(
self.baseuri + '/{owner}/{did}/features/{fid}').expand(
owner=self.username, did=dataset, fid=fid)
return self.session.put(uri, json=feature)
def delete_feature(self, dataset, fid):
"""Delete a dataset feature.
:param dataset: the dataset identifier string.
:param fid: the feature identifier string.
"""
uri = URITemplate(
self.baseuri + '/{owner}/{did}/features/{fid}').expand(
owner=self.username, did=dataset, fid=fid)
return self.session.delete(uri)
| {
"content_hash": "270f84f0a1ab2ce447b08e2251f0eb7a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 77,
"avg_line_length": 35.041916167664674,
"alnum_prop": 0.6119275461380724,
"repo_name": "perrygeo/mapbox-sdk-py",
"id": "0c0e6e5d85dbc37f8b11c20c76cd8baa2a2c2ffd",
"size": "5870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapbox/services/datasets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87409"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from nacl._lib import lib
def randombytes(size):
"""
Returns ``size`` number of random bytes from a cryptographically secure
random source.
:param size: int
:rtype: bytes
"""
buf = lib.ffi.new("unsigned char[]", size)
lib.randombytes(buf, size)
return lib.ffi.buffer(buf, size)[:]
| {
"content_hash": "26b58adb05cd4c19a3fd8166dd412a50",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 24.125,
"alnum_prop": 0.6606217616580311,
"repo_name": "scholarly/pynacl",
"id": "a28cd88130d69e10bee1dff0d914da17046b31da",
"size": "988",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/nacl/c/randombytes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "17958"
},
{
"name": "C",
"bytes": "558716"
},
{
"name": "C++",
"bytes": "15818"
},
{
"name": "Python",
"bytes": "95599"
},
{
"name": "Shell",
"bytes": "341585"
},
{
"name": "Visual Basic",
"bytes": "294"
}
],
"symlink_target": ""
} |
import datetime
from django.contrib.admin import ModelAdmin
from django.contrib.admin.templatetags.admin_list import date_hierarchy
from django.contrib.admin.templatetags.admin_modify import submit_row
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.test import RequestFactory, TestCase
from django.urls import reverse
from .admin import ArticleAdmin, site
from .models import Article, Question
from .tests import AdminViewBasicTestCase
class AdminTemplateTagsTest(AdminViewBasicTestCase):
request_factory = RequestFactory()
def test_submit_row(self):
"""
submit_row template tag should pass whole context.
"""
request = self.request_factory.get(reverse('admin:auth_user_change', args=[self.superuser.pk]))
request.user = self.superuser
admin = UserAdmin(User, site)
extra_context = {'extra': True}
response = admin.change_view(request, str(self.superuser.pk), extra_context=extra_context)
template_context = submit_row(response.context_data)
self.assertIs(template_context['extra'], True)
self.assertIs(template_context['show_save'], True)
def test_override_show_save_and_add_another(self):
request = self.request_factory.get(
reverse('admin:auth_user_change', args=[self.superuser.pk]),
)
request.user = self.superuser
admin = UserAdmin(User, site)
for extra_context, expected_flag in (
({}, True), # Default.
({'show_save_and_add_another': False}, False),
):
with self.subTest(show_save_and_add_another=expected_flag):
response = admin.change_view(
request,
str(self.superuser.pk),
extra_context=extra_context,
)
template_context = submit_row(response.context_data)
self.assertIs(template_context['show_save_and_add_another'], expected_flag)
def test_override_change_form_template_tags(self):
"""
admin_modify template tags follow the standard search pattern
admin/app_label/model/template.html.
"""
article = Article.objects.all()[0]
request = self.request_factory.get(reverse('admin:admin_views_article_change', args=[article.pk]))
request.user = self.superuser
admin = ArticleAdmin(Article, site)
extra_context = {'show_publish': True, 'extra': True}
response = admin.change_view(request, str(article.pk), extra_context=extra_context)
response.render()
self.assertIs(response.context_data['show_publish'], True)
self.assertIs(response.context_data['extra'], True)
self.assertContains(response, 'name="_save"')
self.assertContains(response, 'name="_publish"')
self.assertContains(response, 'override-change_form_object_tools')
self.assertContains(response, 'override-prepopulated_fields_js')
def test_override_change_list_template_tags(self):
"""
admin_list template tags follow the standard search pattern
admin/app_label/model/template.html.
"""
request = self.request_factory.get(reverse('admin:admin_views_article_changelist'))
request.user = self.superuser
admin = ArticleAdmin(Article, site)
admin.date_hierarchy = 'date'
admin.search_fields = ('title', 'content')
response = admin.changelist_view(request)
response.render()
self.assertContains(response, 'override-actions')
self.assertContains(response, 'override-change_list_object_tools')
self.assertContains(response, 'override-change_list_results')
self.assertContains(response, 'override-date_hierarchy')
self.assertContains(response, 'override-pagination')
self.assertContains(response, 'override-search_form')
class DateHierarchyTests(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def test_choice_links(self):
modeladmin = ModelAdmin(Question, site)
modeladmin.date_hierarchy = 'posted'
posted_dates = (
datetime.date(2017, 10, 1),
datetime.date(2017, 10, 1),
datetime.date(2017, 12, 15),
datetime.date(2017, 12, 15),
datetime.date(2017, 12, 31),
datetime.date(2018, 2, 1),
)
Question.objects.bulk_create(Question(question='q', posted=posted) for posted in posted_dates)
tests = (
({}, [['year=2017'], ['year=2018']]),
({'year': 2016}, []),
({'year': 2017}, [['month=10', 'year=2017'], ['month=12', 'year=2017']]),
({'year': 2017, 'month': 9}, []),
({'year': 2017, 'month': 12}, [['day=15', 'month=12', 'year=2017'], ['day=31', 'month=12', 'year=2017']]),
)
for query, expected_choices in tests:
with self.subTest(query=query):
query = {'posted__%s' % q: val for q, val in query.items()}
request = self.factory.get('/', query)
request.user = self.superuser
changelist = modeladmin.get_changelist_instance(request)
spec = date_hierarchy(changelist)
choices = [choice['link'] for choice in spec['choices']]
expected_choices = [
'&'.join('posted__%s' % c for c in choice) for choice in expected_choices
]
expected_choices = [('?' + choice) if choice else '' for choice in expected_choices]
self.assertEqual(choices, expected_choices)
| {
"content_hash": "e9d566b7f2be16a1c89e12fc9a3f204b",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 118,
"avg_line_length": 44.784615384615385,
"alnum_prop": 0.6228100309172105,
"repo_name": "georgemarshall/django",
"id": "f717b8e1b7a02ea373928b5f35b6948527803150",
"size": "5822",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/admin_views/test_templatetags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53023"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448123"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12112373"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Tests for vcfio_header_io module."""
import collections
import os
import unittest
from pysam import libcbcf
import apache_beam as beam
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
import apache_beam.io.source_test_utils as source_test_utils
from apache_beam.testing.test_pipeline import TestPipeline
from gcp_variant_transforms.beam_io.vcf_header_io import LAST_HEADER_LINE_PREFIX
from gcp_variant_transforms.beam_io.vcf_header_io import VcfHeaderSource
from gcp_variant_transforms.beam_io.vcf_header_io import ReadAllVcfHeaders
from gcp_variant_transforms.beam_io.vcf_header_io import ReadVcfHeaders
from gcp_variant_transforms.beam_io.vcf_header_io import VcfHeader
from gcp_variant_transforms.beam_io.vcf_header_io import WriteVcfHeaderFn
from gcp_variant_transforms.beam_io.vcf_header_io import WriteVcfHeaders
from gcp_variant_transforms.testing import asserts
from gcp_variant_transforms.testing import temp_dir
from gcp_variant_transforms.testing import testdata_util
def _get_header_from_reader(vcf_reader, file_path=None):
return VcfHeader(infos=vcf_reader.infos,
filters=vcf_reader.filters,
alts=vcf_reader.alts,
formats=vcf_reader.formats,
contigs=vcf_reader.contigs,
samples=vcf_reader.samples,
file_path=file_path)
def _get_vcf_header_from_lines(lines, file_name=None):
header = libcbcf.VariantHeader()
sample_line = LAST_HEADER_LINE_PREFIX
header.add_line('##fileformat=VCFv4.0')
for line in lines:
if line.startswith('#'):
if line.startswith(LAST_HEADER_LINE_PREFIX):
sample_line = line.strip()
break
header.add_line(line.strip())
else:
break
return VcfHeader(infos=header.info,
filters=header.filters,
alts=header.alts,
formats=header.formats,
contigs=header.contigs,
samples=sample_line,
file_path=file_name)
class VcfHeaderSourceTest(unittest.TestCase):
# TODO(msaul): Replace get_full_dir() with function from utils.
# Distribution should skip tests that need VCF files due to large size
VCF_FILE_DIR_MISSING = not os.path.exists(testdata_util.get_full_dir())
def setUp(self):
self.lines = testdata_util.get_sample_vcf_header_lines()
def _create_file_and_read_headers(self):
with temp_dir.TempDir() as tempdir:
filename = tempdir.create_temp_file(suffix='.vcf', lines=self.lines)
headers = source_test_utils.read_from_source(
VcfHeaderSource(filename))
return headers[0]
def test_vcf_header_eq(self):
header_1 = _get_vcf_header_from_lines(self.lines)
header_2 = _get_vcf_header_from_lines(self.lines)
self.assertEqual(header_1, header_2)
def test_read_file_headers(self):
headers = self.lines
self.lines = testdata_util.get_sample_vcf_file_lines()
header = self._create_file_and_read_headers()
self.assertEqual(header, _get_vcf_header_from_lines(headers))
def test_malformed_headers(self):
# TODO(tneymanov): Add more tests.
malformed_header_lines = [
# Malformed FILTER.
[
'##FILTER=<ID=PASS,Description="All filters passed">\n',
'##FILTER=<ID=LowQual,Descri\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample\n',
'19\t123\trs12345\tT\tC\t50\tq10\tAF=0.2;NS=2\tGT:GQ\t1|0:48'
]
]
for content in malformed_header_lines:
self.lines = content
with self.assertRaises(ValueError):
self._create_file_and_read_headers()
def test_all_fields(self):
self.lines = [
'##contig=<ID=M,length=16,assembly=B37,md5=c6,species="Homosapiens">\n',
'##contig=<ID=P,length=16,assembly=B37,md5=c6,species="Homosapiens">\n',
'\n',
'##ALT=<ID=CGA_CNVWIN,Description="Copy number analysis window">\n',
'##ALT=<ID=INS:ME:MER,Description="Insertion of MER element">\n',
'##FILTER=<ID=MPCBT,Description="Mate pair count below 10">\n',
'##INFO=<ID=CGA_MIRB,Number=.,Type=String,Description="miRBaseId">\n',
'##FORMAT=<ID=FT,Number=1,Type=String,Description="Genotype filter">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT GS000016676-ASM\n',
]
header = self._create_file_and_read_headers()
self.assertCountEqual(list(header.contigs.keys()), ['M', 'P'])
self.assertCountEqual(
list(header.alts.keys()), ['CGA_CNVWIN', 'INS:ME:MER'])
self.assertCountEqual(list(header.filters.keys()), ['MPCBT'])
self.assertCountEqual(list(header.infos.keys()), ['CGA_MIRB'])
self.assertCountEqual(list(header.formats.keys()), ['FT'])
def test_empty_header_raises_error(self):
self.lines = testdata_util.get_sample_vcf_record_lines()
with self.assertRaises(ValueError):
self._create_file_and_read_headers()
def test_read_file_pattern(self):
with temp_dir.TempDir() as tempdir:
headers_1 = [self.lines[1], self.lines[-1]]
headers_2 = [self.lines[2], self.lines[3], self.lines[-1]]
headers_3 = [self.lines[4], self.lines[-1]]
file_name_1 = tempdir.create_temp_file(suffix='.vcf', lines=headers_1)
file_name_2 = tempdir.create_temp_file(suffix='.vcf', lines=headers_2)
file_name_3 = tempdir.create_temp_file(suffix='.vcf', lines=headers_3)
actual = source_test_utils.read_from_source(VcfHeaderSource(
os.path.join(tempdir.get_path(), '*.vcf')))
expected = [_get_vcf_header_from_lines(h, file_name=file_name)
for h, file_name in [(headers_1, file_name_1),
(headers_2, file_name_2),
(headers_3, file_name_3)]]
asserts.header_vars_equal(expected)(actual)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_single_file_large(self):
test_data_conifgs = [
{'file': 'valid-4.0.vcf', 'num_infos': 6, 'num_formats': 4},
{'file': 'valid-4.0.vcf.gz', 'num_infos': 6, 'num_formats': 4},
{'file': 'valid-4.0.vcf.bz2', 'num_infos': 6, 'num_formats': 4},
{'file': 'valid-4.1-large.vcf', 'num_infos': 21, 'num_formats': 33},
{'file': 'valid-4.2.vcf', 'num_infos': 8, 'num_formats': 5},
]
for config in test_data_conifgs:
read_data = source_test_utils.read_from_source(VcfHeaderSource(
testdata_util.get_full_file_path(config['file'])))
self.assertEqual(config['num_infos'], len(read_data[0].infos))
self.assertEqual(config['num_formats'], len(read_data[0].formats))
def test_pipeline_read_file_headers(self):
headers = self.lines
self.lines = testdata_util.get_sample_vcf_file_lines()
with temp_dir.TempDir() as tempdir:
filename = tempdir.create_temp_file(suffix='.vcf', lines=self.lines)
pipeline = TestPipeline()
pcoll = pipeline | 'ReadHeaders' >> ReadVcfHeaders(filename)
assert_that(pcoll, equal_to([_get_vcf_header_from_lines(headers)]))
pipeline.run()
def test_pipeline_read_all_file_headers(self):
headers = self.lines
self.lines = testdata_util.get_sample_vcf_file_lines()
with temp_dir.TempDir() as tempdir:
filename = tempdir.create_temp_file(suffix='.vcf', lines=self.lines)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([filename])
| 'ReadHeaders' >> ReadAllVcfHeaders())
assert_that(pcoll, equal_to([_get_vcf_header_from_lines(headers)]))
pipeline.run()
def test_pipeline_read_file_pattern(self):
with temp_dir.TempDir() as tempdir:
headers_1 = [self.lines[1], self.lines[-1]]
headers_2 = [self.lines[2], self.lines[3], self.lines[-1]]
headers_3 = [self.lines[4], self.lines[-1]]
file_name_1 = tempdir.create_temp_file(suffix='.vcf', lines=headers_1)
file_name_2 = tempdir.create_temp_file(suffix='.vcf', lines=headers_2)
file_name_3 = tempdir.create_temp_file(suffix='.vcf', lines=headers_3)
pipeline = TestPipeline()
pcoll = pipeline | 'ReadHeaders' >> ReadVcfHeaders(
os.path.join(tempdir.get_path(), '*.vcf'))
expected = [_get_vcf_header_from_lines(h, file_name=file_name)
for h, file_name in [(headers_1, file_name_1),
(headers_2, file_name_2),
(headers_3, file_name_3)]]
assert_that(pcoll, asserts.header_vars_equal(expected))
pipeline.run()
def test_pipeline_read_all_file_pattern(self):
with temp_dir.TempDir() as tempdir:
headers_1 = [self.lines[1], self.lines[-1]]
headers_2 = [self.lines[2], self.lines[3], self.lines[-1]]
headers_3 = [self.lines[4], self.lines[-1]]
file_name_1 = tempdir.create_temp_file(suffix='.vcf', lines=headers_1)
file_name_2 = tempdir.create_temp_file(suffix='.vcf', lines=headers_2)
file_name_3 = tempdir.create_temp_file(suffix='.vcf', lines=headers_3)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create(
[os.path.join(tempdir.get_path(), '*.vcf')])
| 'ReadHeaders' >> ReadAllVcfHeaders())
expected = [_get_vcf_header_from_lines(h, file_name=file_name)
for h, file_name in [(headers_1, file_name_1),
(headers_2, file_name_2),
(headers_3, file_name_3)]]
assert_that(pcoll, asserts.header_vars_equal(expected))
pipeline.run()
class WriteVcfHeadersTest(unittest.TestCase):
def setUp(self):
self.lines = testdata_util.get_sample_vcf_header_lines()
def test_to_vcf_header_line(self):
header_fn = WriteVcfHeaderFn('')
header = collections.OrderedDict([
('id', 'NS'),
('num', 1),
('type', 'Integer'),
('desc', 'Number samples'),
])
expected = ('##INFO=<ID=NS,Number=1,Type=Integer,'
'Description="Number samples">\n')
self.assertEqual(header_fn._to_vcf_header_line('INFO', header),
expected)
def test_raises_error_for_invalid_key(self):
header_fn = WriteVcfHeaderFn('')
header = collections.OrderedDict([('number', 0)])
with self.assertRaises(ValueError):
header_fn._format_header_key_value('number', header['number'])
def test_raises_error_for_invalid_num(self):
header_fn = WriteVcfHeaderFn('')
header = collections.OrderedDict([('num', -4)])
with self.assertRaises(ValueError):
header_fn._format_header_key_value('num', header['num'])
def test_info_source_and_version(self):
self.lines = [
'##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth",'
'Source="source",Version="version">\n',
self.lines[-1]
]
header = _get_vcf_header_from_lines(self.lines)
header_fn = WriteVcfHeaderFn('')
actual = header_fn._to_vcf_header_line(
'INFO', list(header.infos.values())[0])
expected = self.lines[0]
self.assertEqual(actual, expected)
def test_write_contig(self):
self.lines = [
'##contig=<ID=M,length=16,assembly=B37,md5=c6,species="Homosapiens">\n',
self.lines[-1],
]
header = _get_vcf_header_from_lines(self.lines)
header_fn = WriteVcfHeaderFn('')
actual = header_fn._to_vcf_header_line(
'contig', list(header.contigs.values())[0])
expected = '##contig=<ID=M,length=16>\n'
self.assertEqual(actual, expected)
def test_write_info_number_types(self):
self.lines = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##INFO=<ID=HG,Number=G,Type=Integer,Description="IntInfo_G">\n',
'##INFO=<ID=HR,Number=R,Type=String,Description="ChrInfo_R">\n',
self.lines[-1],
]
header = _get_vcf_header_from_lines(self.lines)
header_fn = WriteVcfHeaderFn('')
actual = []
for info in list(header.infos.values()):
actual.append(header_fn._to_vcf_header_line('INFO', info))
expected = self.lines[:-1]
self.assertCountEqual(actual, expected)
def test_write_headers(self):
header = _get_vcf_header_from_lines(self.lines)
with temp_dir.TempDir() as tempdir:
tempfile = tempdir.create_temp_file(suffix='.vcf')
header_fn = WriteVcfHeaderFn(tempfile)
header_fn.process(header)
self._assert_file_contents_equal(tempfile, self.lines)
def test_write_headers_with_vcf_version_line(self):
header = _get_vcf_header_from_lines(self.lines)
vcf_version_line = '##fileformat=VCFv4.3\n'
expected_results = [
vcf_version_line.encode('utf-8'),
b'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
b'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
b'##INFO=<ID=HG,Number=G,Type=Integer,Description="IntInfo_G">\n',
b'##INFO=<ID=HR,Number=R,Type=String,Description="ChrInfo_R">\n',
b'##FILTER=<ID=MPCBT,Description="Mate pair count below 10">\n',
b'##ALT=<ID=INS:ME:MER,Description="Insertion of MER element">\n',
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n',
b'##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="GQ">\n',
b'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT\n'
]
with temp_dir.TempDir() as tempdir:
tempfile = tempdir.create_temp_file(suffix='.vcf')
header_fn = WriteVcfHeaderFn(tempfile)
header_fn.process(header, vcf_version_line)
with open(tempfile, 'rb') as f:
actual = f.readlines()
self.assertCountEqual(actual, expected_results)
def _remove_sample_names(self, line):
# Return line with all columns except sample names.
return b'\t'.join(line.split(b'\t')[:9])
def _assert_file_contents_equal(self, file_name, lines):
with open(file_name, 'rb') as f:
actual = f.read().splitlines()
expected = [s.strip().encode('utf-8') for s in lines[1:]]
expected[-1] = self._remove_sample_names(expected[-1])
self.assertCountEqual(actual, expected)
def test_write_dataflow(self):
header = _get_vcf_header_from_lines(self.lines)
with temp_dir.TempDir() as tempdir:
tempfile = tempdir.create_temp_file(suffix='.vcf')
pipeline = TestPipeline()
pcoll = pipeline | beam.Create([header])
_ = pcoll | 'Write' >> WriteVcfHeaders(tempfile)
pipeline.run()
self._assert_file_contents_equal(tempfile, self.lines)
| {
"content_hash": "22fdb2a33314535af3fe8da4ad18fc19",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 80,
"avg_line_length": 41.15406162464986,
"alnum_prop": 0.6355839912877757,
"repo_name": "googlegenomics/gcp-variant-transforms",
"id": "05e8f91cf6f15c865752d253b6310e0b200b0eca",
"size": "15291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcp_variant_transforms/beam_io/vcf_header_io_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3534"
},
{
"name": "Python",
"bytes": "1101324"
},
{
"name": "Shell",
"bytes": "17097"
}
],
"symlink_target": ""
} |
import agate
from csvkit.cli import CSVKitUtility
class CSVPy(CSVKitUtility):
description = 'Load a CSV file into a CSV reader and then drop into a Python shell.'
def add_arguments(self):
self.argparser.add_argument('--dict', dest='as_dict', action='store_true',
help='Load the CSV file into a DictReader.')
self.argparser.add_argument('--agate', dest='as_agate', action='store_true',
help='Load the CSV file into an agate table.')
def main(self):
# Attempt reading filename, will cause lazy loader to access file and raise error if it does not exist
filename = self.input_file.name
if self.args.as_dict:
klass = agate.csv.DictReader
class_name = 'agate.csv.DictReader'
variable_name = 'reader'
elif self.args.as_agate:
klass = agate.Table.from_csv
class_name = 'agate.Table'
variable_name = 'table'
else:
klass = agate.csv.reader
class_name = 'agate.csv.reader'
variable_name = 'reader'
variable = klass(self.input_file, **self.reader_kwargs)
welcome_message = 'Welcome! "%s" has been loaded in an %s object named "%s".' % (filename, class_name, variable_name)
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
exec('%s = variable' % variable_name)
ipy = InteractiveShellEmbed(banner1=welcome_message)
ipy()
except ImportError:
import code
code.interact(welcome_message, local={variable_name: variable})
def launch_new_instance():
utility = CSVPy()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| {
"content_hash": "b14579651a05d5c2defe8f00c9c9b292",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 125,
"avg_line_length": 34.82692307692308,
"alnum_prop": 0.5919381557150746,
"repo_name": "doganmeh/csvkit",
"id": "2d7802929be3aa776195f4f817f5eb0a6820777e",
"size": "1834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csvkit/utilities/csvpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177601"
}
],
"symlink_target": ""
} |
"""Matplotlib energyplot."""
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import rankdata
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, backend_show, create_axes_grid, matplotlib_kwarg_dealiaser
def plot_ess(
ax,
plotters,
xdata,
ess_tail_dataset,
mean_ess,
sd_ess,
idata,
data,
kind,
extra_methods,
textsize,
rows,
cols,
figsize,
kwargs,
extra_kwargs,
text_kwargs,
n_samples,
relative,
min_ess,
labeller,
ylabel,
rug,
rug_kind,
rug_kwargs,
hline_kwargs,
backend_kwargs,
show,
):
"""Matplotlib ess plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, ax_labelsize, titlesize, xt_labelsize, _linewidth, _markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs["squeeze"] = True
kwargs = matplotlib_kwarg_dealiaser(kwargs, "plot")
_linestyle = "-" if kind == "evolution" else "none"
kwargs.setdefault("linestyle", _linestyle)
kwargs.setdefault("linewidth", _linewidth)
kwargs.setdefault("markersize", _markersize)
kwargs.setdefault("marker", "o")
kwargs.setdefault("zorder", 3)
extra_kwargs = matplotlib_kwarg_dealiaser(extra_kwargs, "plot")
if kind == "evolution":
extra_kwargs = {
**extra_kwargs,
**{key: item for key, item in kwargs.items() if key not in extra_kwargs},
}
kwargs.setdefault("label", "bulk")
extra_kwargs.setdefault("label", "tail")
else:
extra_kwargs.setdefault("linewidth", _linewidth / 2)
extra_kwargs.setdefault("color", "k")
extra_kwargs.setdefault("alpha", 0.5)
kwargs.setdefault("label", kind)
hline_kwargs = matplotlib_kwarg_dealiaser(hline_kwargs, "plot")
hline_kwargs.setdefault("linewidth", _linewidth)
hline_kwargs.setdefault("linestyle", "--")
hline_kwargs.setdefault("color", "gray")
hline_kwargs.setdefault("alpha", 0.7)
if extra_methods:
text_kwargs = matplotlib_kwarg_dealiaser(text_kwargs, "text")
text_x = text_kwargs.pop("x", 1)
text_kwargs.setdefault("fontsize", xt_labelsize * 0.7)
text_kwargs.setdefault("alpha", extra_kwargs["alpha"])
text_kwargs.setdefault("color", extra_kwargs["color"])
text_kwargs.setdefault("horizontalalignment", "right")
text_va = text_kwargs.pop("verticalalignment", None)
if ax is None:
_, ax = create_axes_grid(
len(plotters),
rows,
cols,
backend_kwargs=backend_kwargs,
)
for (var_name, selection, isel, x), ax_ in zip(plotters, np.ravel(ax)):
ax_.plot(xdata, x, **kwargs)
if kind == "evolution":
ess_tail = ess_tail_dataset[var_name].sel(**selection)
ax_.plot(xdata, ess_tail, **extra_kwargs)
elif rug:
rug_kwargs = matplotlib_kwarg_dealiaser(rug_kwargs, "plot")
if not hasattr(idata, "sample_stats"):
raise ValueError("InferenceData object must contain sample_stats for rug plot")
if not hasattr(idata.sample_stats, rug_kind):
raise ValueError(f"InferenceData does not contain {rug_kind} data")
rug_kwargs.setdefault("marker", "|")
rug_kwargs.setdefault("linestyle", rug_kwargs.pop("ls", "None"))
rug_kwargs.setdefault("color", rug_kwargs.pop("c", kwargs.get("color", "C0")))
rug_kwargs.setdefault("space", 0.1)
rug_kwargs.setdefault("markersize", rug_kwargs.pop("ms", 2 * _markersize))
values = data[var_name].sel(**selection).values.flatten()
mask = idata.sample_stats[rug_kind].values.flatten()
values = rankdata(values, method="average")[mask]
rug_space = np.max(x) * rug_kwargs.pop("space")
rug_x, rug_y = values / (len(mask) - 1), np.zeros_like(values) - rug_space
ax_.plot(rug_x, rug_y, **rug_kwargs)
ax_.axhline(0, color="k", linewidth=_linewidth, alpha=0.7)
if extra_methods:
mean_ess_i = mean_ess[var_name].sel(**selection).values.item()
sd_ess_i = sd_ess[var_name].sel(**selection).values.item()
ax_.axhline(mean_ess_i, **extra_kwargs)
ax_.annotate(
"mean",
(text_x, mean_ess_i),
va=text_va
if text_va is not None
else "bottom"
if mean_ess_i >= sd_ess_i
else "top",
**text_kwargs,
)
ax_.axhline(sd_ess_i, **extra_kwargs)
ax_.annotate(
"sd",
(text_x, sd_ess_i),
va=text_va if text_va is not None else "bottom" if sd_ess_i > mean_ess_i else "top",
**text_kwargs,
)
if relative and kind == "evolution":
thin_xdata = np.linspace(xdata.min(), xdata.max(), 100)
ax_.plot(thin_xdata, min_ess / thin_xdata, **hline_kwargs)
else:
hline = min_ess / n_samples if relative else min_ess
ax_.axhline(hline, **hline_kwargs)
ax_.set_title(
labeller.make_label_vert(var_name, selection, isel), fontsize=titlesize, wrap=True
)
ax_.tick_params(labelsize=xt_labelsize)
ax_.set_xlabel(
"Total number of draws" if kind == "evolution" else "Quantile", fontsize=ax_labelsize
)
ax_.set_ylabel(
ylabel.format("Relative ESS" if relative else "ESS"), fontsize=ax_labelsize, wrap=True
)
if kind == "evolution":
ax_.legend(title="Method", fontsize=xt_labelsize, title_fontsize=xt_labelsize)
else:
ax_.set_xlim(0, 1)
if rug:
ax_.yaxis.get_major_locator().set_params(nbins="auto", steps=[1, 2, 5, 10])
_, ymax = ax_.get_ylim()
yticks = ax_.get_yticks().astype(np.int64)
yticks = yticks[(yticks >= 0) & (yticks < ymax)]
ax_.set_yticks(yticks)
ax_.set_yticklabels(yticks)
else:
ax_.set_ylim(bottom=0)
if backend_show(show):
plt.show()
return ax
| {
"content_hash": "a0c84a41e6e780fb6954f22566bf66ea",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 100,
"avg_line_length": 35.927374301675975,
"alnum_prop": 0.5703623075726948,
"repo_name": "arviz-devs/arviz",
"id": "3decb0eae974e91d3031fcaae13635bfbe973510",
"size": "6431",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "arviz/plots/backends/matplotlib/essplot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5900"
},
{
"name": "Dockerfile",
"bytes": "1771"
},
{
"name": "HTML",
"bytes": "1343"
},
{
"name": "Jupyter Notebook",
"bytes": "641262"
},
{
"name": "Makefile",
"bytes": "688"
},
{
"name": "PowerShell",
"bytes": "2668"
},
{
"name": "Python",
"bytes": "1634423"
},
{
"name": "R",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "7276"
},
{
"name": "TeX",
"bytes": "24620"
}
],
"symlink_target": ""
} |
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression, ElasticNet
import numpy as np
from sklearn.datasets import load_boston
boston = load_boston()
x = np.array([np.concatenate((v, [1])) for v in boston.data])
y = boston.target
FIT_EN = False
if FIT_EN:
model = ElasticNet(fit_intercept=True, alpha=0.5)
else:
model = LinearRegression(fit_intercept=True)
model.fit(x, y)
p = np.array([model.predict(xi) for xi in x])
e = p - y
total_error = np.dot(e, e)
rmse_train = np.sqrt(total_error / len(p))
kf = KFold(len(x), n_folds=10)
err = 0
for train, test in kf:
model.fit(x[train], y[train])
p = np.array([model.predict(xi) for xi in x[test]])
e = p - y[test]
err += np.dot(e, e)
rmse_10cv = np.sqrt(err / len(x))
print('RMSE on training: {}'.format(rmse_train))
print('RMSE on 10-fold CV: {}'.format(rmse_10cv))
| {
"content_hash": "bedeeff63850ebdefc319e9e9f98d602",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 29.233333333333334,
"alnum_prop": 0.6750285062713797,
"repo_name": "Nelca/buildMLSystem",
"id": "2ae52e02b94570d7acab79a5ec04e0d8d1424a97",
"size": "1094",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ch07/cv10_lr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "69135856"
},
{
"name": "Makefile",
"bytes": "4816"
},
{
"name": "Python",
"bytes": "210634"
},
{
"name": "Shell",
"bytes": "635"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from rango import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^about/$', views.about, name='about'),
url(r'^category/(?P<category_name_url>\w+)/$', views.category, name='category'),
url(r'^add_category/$', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_url>\w+)/add_page/$', views.add_page, name='add_page'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^restricted/$', views.restricted, name='restricted'),
url(r'^logout/$', views.user_logout, name='logout'),
#url(r'^search/$', views.search, name='search'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^goto/$', views.track_url, name='track_url'),
url(r'^like_category/$', views.like_category, name='like_category'),
url(r'^suggest_category/$', views.suggest_category, name='suggest_category'),
url(r'^auto_add_page/$', views.auto_add_page, name='auto_add_page'),
)
| {
"content_hash": "c2364db6672e738e5db8291a861a6f75",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 49.61904761904762,
"alnum_prop": 0.654510556621881,
"repo_name": "sleonr0792/twd",
"id": "6b2589329a621f81a7faddf20ff5af48a971c466",
"size": "1042",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tango_with_django_project/rango/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38571"
},
{
"name": "JavaScript",
"bytes": "941"
},
{
"name": "Python",
"bytes": "61478"
}
],
"symlink_target": ""
} |
import requests
from .. import __version__
class _Service(object):
def __init__(self):
self._BASE_URI = "http://gtr.rcuk.ac.uk/gtr/api/"
def get_session(self, token=None, env=None):
session = requests.Session()
session.headers.update(
{'User-Agent': ' '.join(
[self.product_token, requests.utils.default_user_agent()]),
'Accept': 'application/vnd.rcuk.gtr.json-v3'}) # Returns json only
return session
@property
def product_token(self):
"""A product token for use in User-Agent headers."""
return 'gtr/{0}'.format(__version__)
def handle_http_error(self, response, custom_messages=None,
raise_for_status=False):
if not custom_messages:
custom_messages = {}
if response.status_code in custom_messages.keys():
raise requests.exceptions.HTTPError(
custom_messages[response.status_code])
if raise_for_status:
response.raise_for_status()
| {
"content_hash": "a531d41e8dc69bcddae64eaea2dffcd0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 83,
"avg_line_length": 32,
"alnum_prop": 0.5823863636363636,
"repo_name": "nestauk/gtr",
"id": "933d99e900db925e3162b73b33ad9ea906a08ea4",
"size": "1056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gtr/services/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20368"
}
],
"symlink_target": ""
} |
"""
CertificatesEnddate - command ``/usr/bin/openssl x509 -noout -enddate -in path/to/cert/file``
=============================================================================================
This command gets the enddates of certificate files.
Typical output of this command is::
/usr/bin/find: '/etc/origin/node': No such file or directory
/usr/bin/find: '/etc/origin/master': No such file or directory
notAfter=May 25 16:39:40 2019 GMT
FileName= /etc/origin/node/cert.pem
unable to load certificate
139881193203616:error:0906D066:PEM routines:PEM_read_bio:bad end line:pem_lib.c:802:
unable to load certificate
140695459370912:error:0906D06C:PEM routines:PEM_read_bio:no start line:pem_lib.c:703:Expecting: TRUSTED CERTIFICATE
notAfter=May 25 16:39:40 2019 GMT
FileName= /etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem
notAfter=Dec 9 10:55:38 2017 GMT
FileName= /etc/pki/consumer/cert.pem
notAfter=Jan 1 04:59:59 2022 GMT
FileName= /etc/pki/entitlement/3343502840335059594.pem
notAfter=Aug 31 02:19:59 2017 GMT
FileName= /etc/pki/consumer/cert.pem
notAfter=Jan 1 04:59:59 2022 GMT
FileName= /etc/pki/entitlement/2387590574974617178.pem
Examples:
>>> cert_enddate = shared[CertificatesEnddate]
>>> paths = cert_enddate.get_certificates_path
>>> paths[0]
'/etc/origin/node/cert.pem'
>>> cert_enddate.expiration_date(paths[0]).datetime
datetime(2019, 05, 25, 16, 39, 40)
>>> cert_enddate.expiration_date(paths[0]).str
'May 25 16:39:40 2019'
"""
from datetime import datetime
from collections import namedtuple
from .. import Parser, parser, LegacyItemAccess
from insights.specs import certificates_enddate
@parser(certificates_enddate)
class CertificatesEnddate(LegacyItemAccess, Parser):
"""Class to parse the expiration dates."""
ExpirationDate = namedtuple('ExpirationDate', ['str', 'datetime'])
"""namedtuple: contains the expiration date in string and datetime format."""
def parse_content(self, content):
"""Parse the content of crt files."""
self.data = {}
datestamp = None
for l in content:
if datestamp and l.startswith("FileName="):
self.data[l.split("=")[-1].strip()] = datestamp
datestamp = None
elif l.startswith("notAfter="):
datestamp = l.split("=")[-1].rsplit(" ", 1)[0]
else:
datestamp = None
@property
def certificates_path(self):
"""list: Return filepaths in list or []."""
return self.data.keys() if self.data else []
def expiration_date(self, path):
"""This will return a namedtuple(['str', 'datetime']) contains the
expiration date in string and datetime format. If the expiration date
is unparsable, the ExpirationDate.datetime should be None.
Args:
path(str): The certificate file path.
Returns:
A ExpirationDate for available path. None otherwise.
"""
path_date = self.data.get(path)
if path_date:
try:
path_datetime = datetime.strptime(path_date, '%b %d %H:%M:%S %Y')
return self.ExpirationDate(path_date, path_datetime)
except:
return self.ExpirationDate(path_date, None)
| {
"content_hash": "fcf49353b6a51c736ebc33a249dcd037",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 119,
"avg_line_length": 38.60919540229885,
"alnum_prop": 0.6341172968145281,
"repo_name": "wcmitchell/insights-core",
"id": "1e29a58c6d8854562e2d956bd58e93de1fcb0c9c",
"size": "3359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/certificates_enddate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "19339"
},
{
"name": "Jupyter Notebook",
"bytes": "91793"
},
{
"name": "Python",
"bytes": "3414025"
},
{
"name": "Shell",
"bytes": "2274"
}
],
"symlink_target": ""
} |
import uuid
from six.moves import http_client
from keystone.tests.unit import test_v3
class BaseTestCase(test_v3.RestfulTestCase):
EXTENSION_TO_ADD = 'simple_cert_extension'
CA_PATH = '/v3/OS-SIMPLE-CERT/ca'
CERT_PATH = '/v3/OS-SIMPLE-CERT/certificates'
class TestSimpleCert(BaseTestCase):
def request_cert(self, path):
content_type = 'application/x-pem-file'
response = self.request(app=self.public_app,
method='GET',
path=path,
headers={'Accept': content_type},
expected_status=http_client.OK)
self.assertEqual(content_type, response.content_type.lower())
self.assertIn('---BEGIN', response.body)
return response
def test_ca_cert(self):
self.request_cert(self.CA_PATH)
def test_signing_cert(self):
self.request_cert(self.CERT_PATH)
def test_missing_file(self):
# these files do not exist
self.config_fixture.config(group='signing',
ca_certs=uuid.uuid4().hex,
certfile=uuid.uuid4().hex)
for path in [self.CA_PATH, self.CERT_PATH]:
self.request(app=self.public_app,
method='GET',
path=path,
expected_status=http_client.INTERNAL_SERVER_ERROR)
| {
"content_hash": "30da59d0f994362052a2f5c7b67d8bca",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 30.851063829787233,
"alnum_prop": 0.5524137931034483,
"repo_name": "ajayaa/keystone",
"id": "b241b41baa46a6c3ff69332a1ba1fcf85bc0e717",
"size": "1996",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/test_contrib_simple_cert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4000934"
}
],
"symlink_target": ""
} |
"""
Tools for Lucene.
All Lucene features should be accessed in nordlys through this class.
- Lucene class for ensuring that the same version, analyzer, etc.
are used across nordlys modules. Handles IndexReader, IndexWriter, etc.
- Command line tools for checking indexed document content
@author: Faegheh Hasibi (faegheh.hasibi@idi.ntnu.no)
@author: Krisztian Balog (krisztian.balog@uis.no)
"""
import argparse
import lucene
from nordlys.retrieval.results import RetrievalResults
from java.io import File
from java.util import HashMap, TreeSet
from java.io import StringReader
from java.lang import StringBuilder
from org.apache.lucene.analysis.tokenattributes import CharTermAttribute
from org.apache.lucene.analysis.core import StopFilter
from org.apache.lucene.analysis.core import StopAnalyzer
from org.apache.lucene.analysis.standard import StandardTokenizer
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.analysis.shingle import ShingleAnalyzerWrapper
from org.apache.lucene.document import Document
from org.apache.lucene.document import Field
from org.apache.lucene.document import FieldType
from org.apache.lucene.index import MultiFields
from org.apache.lucene.index import IndexWriter
from org.apache.lucene.index import IndexWriterConfig
from org.apache.lucene.index import DirectoryReader
from org.apache.lucene.index import Term
from org.apache.lucene.index import TermContext
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.search import IndexSearcher
from org.apache.lucene.search import BooleanClause
from org.apache.lucene.search import TermQuery
from org.apache.lucene.search import BooleanQuery
from org.apache.lucene.search import PhraseQuery
from org.apache.lucene.search.spans import SpanNearQuery
from org.apache.lucene.search.spans import SpanTermQuery
from org.apache.lucene.search import FieldValueFilter
from org.apache.lucene.search.similarities import LMJelinekMercerSimilarity
from org.apache.lucene.search.similarities import LMDirichletSimilarity
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.util import BytesRefIterator
from org.apache.lucene.util import Version
from org.apache.lucene.index import SlowCompositeReaderWrapper
# has java VM for Lucene been initialized
lucene_vm_init = False
class Lucene(object):
# default fieldnames for id and contents
FIELDNAME_ID = "id"
FIELDNAME_CONTENTS = "contents"
# internal fieldtypes
# used as Enum, the actual values don't matter
FIELDTYPE_ID = "id"
FIELDTYPE_ID_TV = "id_tv"
FIELDTYPE_TEXT = "text"
FIELDTYPE_TEXT_TV = "text_tv"
FIELDTYPE_TEXT_TVP = "text_tvp"
FIELDTYPE_TEXT_NTV = "text_ntv"
FIELDTYPE_TEXT_NTVP = "text_ntvp"
def __init__(self, index_dir, max_shingle_size=None):
global lucene_vm_init
if not lucene_vm_init:
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
lucene_vm_init = True
self.dir = SimpleFSDirectory(File(index_dir))
self.max_shingle_size = max_shingle_size
self.analyzer = None
self.reader = None
self.searcher = None
self.writer = None
self.ldf = None
@staticmethod
def get_version():
"""Get Lucene version."""
return Version.LUCENE_48
@staticmethod
def preprocess(text):
"""Tokenize and stop the input text."""
ts = StandardTokenizer(Lucene.get_version(), StringReader(text.lower()))
ts = StopFilter(Lucene.get_version(), ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET)
string_builder = StringBuilder()
ts.reset()
char_term_attr = ts.addAttribute(CharTermAttribute.class_)
while ts.incrementToken():
if string_builder.length() > 0:
string_builder.append(" ")
string_builder.append(char_term_attr.toString())
return string_builder.toString()
def get_analyzer(self):
"""Get analyzer."""
if self.analyzer is None:
std_analyzer = StandardAnalyzer(Lucene.get_version())
if self.max_shingle_size is None:
self.analyzer = std_analyzer
else:
self.analyzer = ShingleAnalyzerWrapper(std_analyzer, self.max_shingle_size)
return self.analyzer
def open_reader(self):
"""Open IndexReader."""
if self.reader is None:
self.reader = DirectoryReader.open(self.dir)
def get_reader(self):
return self.reader
def close_reader(self):
"""Close IndexReader."""
if self.reader is not None:
self.reader.close()
self.reader = None
else:
raise Exception("There is no open IndexReader to close")
def open_searcher(self):
"""
Open IndexSearcher. Automatically opens an IndexReader too,
if it is not already open. There is no close method for the
searcher.
"""
if self.searcher is None:
self.open_reader()
self.searcher = IndexSearcher(self.reader)
def get_searcher(self):
"""Returns index searcher (opens it if needed)."""
self.open_searcher()
return self.searcher
def set_lm_similarity_jm(self, method="jm", smoothing_param=0.1):
"""
Set searcher to use LM similarity.
:param method: LM similarity ("jm" or "dirichlet")
:param smoothing_param: smoothing parameter (lambda or mu)
"""
if method == "jm":
similarity = LMJelinekMercerSimilarity(smoothing_param)
elif method == "dirichlet":
similarity = LMDirichletSimilarity(smoothing_param)
else:
raise Exception("Unknown method")
if self.searcher is None:
raise Exception("Searcher has not been created")
self.searcher.setSimilarity(similarity)
def open_writer(self):
"""Open IndexWriter."""
if self.writer is None:
config = IndexWriterConfig(Lucene.get_version(), self.get_analyzer())
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
self.writer = IndexWriter(self.dir, config)
else:
raise Exception("IndexWriter is already open")
def close_writer(self):
"""Close IndexWriter."""
if self.writer is not None:
self.writer.close()
self.writer = None
else:
raise Exception("There is no open IndexWriter to close")
def add_document(self, contents):
"""
Adds a Lucene document with the specified contents to the index.
See LuceneDocument.create_document() for the explanation of contents.
"""
if self.ldf is None: # create a single LuceneDocument object that will be reused
self.ldf = LuceneDocument()
self.writer.addDocument(self.ldf.create_document(contents))
def get_lucene_document_id(self, doc_id):
"""Loads a document from a Lucene index based on its id."""
self.open_searcher()
query = TermQuery(Term(self.FIELDNAME_ID, doc_id))
tophit = self.searcher.search(query, 1).scoreDocs
if len(tophit) == 1:
return tophit[0].doc
else:
return None
def get_document_id(self, lucene_doc_id):
"""Gets lucene document id and returns the document id."""
self.open_reader()
return self.reader.document(lucene_doc_id).get(self.FIELDNAME_ID)
def print_document(self, lucene_doc_id, term_vect=False):
"""Prints document contents."""
if lucene_doc_id is None:
print "Document is not found in the index."
else:
doc = self.reader.document(lucene_doc_id)
print "Document ID (field '" + self.FIELDNAME_ID + "'): " + doc.get(self.FIELDNAME_ID)
# first collect (unique) field names
fields = []
for f in doc.getFields():
if f.name() != self.FIELDNAME_ID and f.name() not in fields:
fields.append(f.name())
for fname in fields:
print fname
for fv in doc.getValues(fname): # printing (possibly multiple) field values
print "\t" + fv
# term vector
if term_vect:
print "-----"
termfreqs = self.get_doc_termfreqs(lucene_doc_id, fname)
for term in termfreqs:
print term + " : " + str(termfreqs[term])
print "-----"
def get_lucene_query(self, query, field=FIELDNAME_CONTENTS):
"""Creates Lucene query from keyword query."""
query = query.replace("(", "").replace(")", "").replace("!", "")
return QueryParser(Lucene.get_version(), field,
self.get_analyzer()).parse(query)
def analyze_query(self, query, field=FIELDNAME_CONTENTS):
"""
Analyses the query and returns query terms.
:param query: query
:param field: field name
:return: list of query terms
"""
qterms = [] # holds a list of analyzed query terms
ts = self.get_analyzer().tokenStream(field, query)
term = ts.addAttribute(CharTermAttribute.class_)
ts.reset()
while ts.incrementToken():
qterms.append(term.toString())
ts.end()
ts.close()
return qterms
def get_id_lookup_query(self, id, field=None):
"""Creates Lucene query for searching by (external) document id."""
if field is None:
field = self.FIELDNAME_ID
return TermQuery(Term(field, id))
def get_and_query(self, queries):
"""Creates an AND Boolean query from multiple Lucene queries."""
# empty boolean query with Similarity.coord() disabled
bq = BooleanQuery(False)
for q in queries:
bq.add(q, BooleanClause.Occur.MUST)
return bq
def get_or_query(self, queries):
"""Creates an OR Boolean query from multiple Lucene queries."""
# empty boolean query with Similarity.coord() disabled
bq = BooleanQuery(False)
for q in queries:
bq.add(q, BooleanClause.Occur.SHOULD)
return bq
def get_phrase_query(self, query, field):
"""Creates phrase query for searching exact phrase."""
phq = PhraseQuery()
for t in query.split():
phq.add(Term(field, t))
return phq
def get_span_query(self, terms, field, slop, ordered=True):
"""
Creates near span query
:param terms: list of terms
:param field: field name
:param slop: number of terms between the query terms
:param ordered: If true, ordered search; otherwise unordered search
:return: lucene span near query
"""
span_queries = []
for term in terms:
span_queries.append(SpanTermQuery(Term(field, term)))
span_near_query = SpanNearQuery(span_queries, slop, ordered)
return span_near_query
def get_doc_phrase_freq(self, phrase, field, slop, ordered):
"""
Returns collection frequency for a given phrase and field.
:param phrase: str
:param field: field name
:param slop: number of terms in between
:param ordered: If true, term occurrences should be ordered
:return: dictionary {doc: freq, ...}
"""
# creates span near query
span_near_query = self.get_span_query(phrase.split(" "), field, slop=slop, ordered=ordered)
# extracts document frequency
self.open_searcher()
index_reader_context = self.searcher.getTopReaderContext()
term_contexts = HashMap()
terms = TreeSet()
span_near_query.extractTerms(terms)
for term in terms:
term_contexts.put(term, TermContext.build(index_reader_context, term))
leaves = index_reader_context.leaves()
doc_phrase_freq = {}
# iterates over all atomic readers
for atomic_reader_context in leaves:
bits = atomic_reader_context.reader().getLiveDocs()
spans = span_near_query.getSpans(atomic_reader_context, bits, term_contexts)
while spans.next():
lucene_doc_id = spans.doc()
doc_id = atomic_reader_context.reader().document(lucene_doc_id).get(self.FIELDNAME_ID)
if doc_id not in doc_phrase_freq:
doc_phrase_freq[doc_id] = 1
else:
doc_phrase_freq[doc_id] += 1
return doc_phrase_freq
def get_id_filter(self):
return FieldValueFilter(self.FIELDNAME_ID)
def __to_retrieval_results(self, scoredocs, field_id=FIELDNAME_ID):
"""Converts Lucene scoreDocs results to RetrievalResults format."""
rr = RetrievalResults()
if scoredocs is not None:
for i in xrange(len(scoredocs)):
score = scoredocs[i].score
lucene_doc_id = scoredocs[i].doc # internal doc_id
doc_id = self.reader.document(lucene_doc_id).get(field_id)
rr.append(doc_id, score, lucene_doc_id)
return rr
def score_query(self, query, field_content=FIELDNAME_CONTENTS, field_id=FIELDNAME_ID, num_docs=100):
"""Scores a given query and return results as a RetrievalScores object."""
lucene_query = self.get_lucene_query(query, field_content)
scoredocs = self.searcher.search(lucene_query, num_docs).scoreDocs
return self.__to_retrieval_results(scoredocs, field_id)
def num_docs(self):
"""Returns number of documents in the index."""
self.open_reader()
return self.reader.numDocs()
def num_fields(self):
"""Returns number of fields in the index."""
self.open_reader()
atomic_reader = SlowCompositeReaderWrapper.wrap(self.reader)
return atomic_reader.getFieldInfos().size()
def get_fields(self):
"""Returns name of fields in the index."""
fields = []
self.open_reader()
atomic_reader = SlowCompositeReaderWrapper.wrap(self.reader)
for fieldInfo in atomic_reader.getFieldInfos().iterator():
fields.append(fieldInfo.name)
return fields
def get_doc_termvector(self, lucene_doc_id, field):
"""Outputs the document term vector as a generator."""
terms = self.reader.getTermVector(lucene_doc_id, field)
if terms:
termenum = terms.iterator(None)
for bytesref in BytesRefIterator.cast_(termenum):
yield bytesref.utf8ToString(), termenum
def get_doc_termfreqs(self, lucene_doc_id, field):
"""
Returns term frequencies for a given document field.
:param lucene_doc_id: Lucene document ID
:param field: document field
:return dict: with terms
"""
termfreqs = {}
for term, termenum in self.get_doc_termvector(lucene_doc_id, field):
termfreqs[term] = int(termenum.totalTermFreq())
return termfreqs
def get_doc_termfreqs_all_fields(self, lucene_doc_id):
"""
Returns term frequency for all fields in the given document.
:param lucene_doc_id: Lucene document ID
:return: dictionary {field: {term: freq, ...}, ...}
"""
doc_termfreqs = {}
vectors = self.reader.getTermVectors(lucene_doc_id)
if vectors:
for field in vectors.iterator():
doc_termfreqs[field] = {}
terms = vectors.terms(field)
if terms:
termenum = terms.iterator(None)
for bytesref in BytesRefIterator.cast_(termenum):
doc_termfreqs[field][bytesref.utf8ToString()] = int(termenum.totalTermFreq())
print doc_termfreqs[field]
return doc_termfreqs
def get_coll_termvector(self, field):
""" Returns collection term vector for the given field."""
self.open_reader()
fields = MultiFields.getFields(self.reader)
if fields is not None:
terms = fields.terms(field)
if terms:
termenum = terms.iterator(None)
for bytesref in BytesRefIterator.cast_(termenum):
yield bytesref.utf8ToString(), termenum
def get_coll_termfreq(self, term, field):
"""
Returns collection term frequency for the given field.
:param term: string
:param field: string, document field
:return: int
"""
self.open_reader()
return self.reader.totalTermFreq(Term(field, term))
def get_doc_freq(self, term, field):
"""
Returns document frequency for the given term and field.
:param term: string, term
:param field: string, document field
:return: int
"""
self.open_reader()
return self.reader.docFreq(Term(field, term))
def get_doc_count(self, field):
"""
Returns number of documents with at least one term for the given field.
:param field: string, field name
:return: int
"""
self.open_reader()
return self.reader.getDocCount(field)
def get_coll_length(self, field):
"""
Returns length of field in the collection.
:param field: string, field name
:return: int
"""
self.open_reader()
return self.reader.getSumTotalTermFreq(field)
def get_avg_len(self, field):
"""
Returns average length of a field in the collection.
:param field: string, field name
"""
self.open_reader()
n = self.reader.getDocCount(field) # number of documents with at least one term for this field
len_all = self.reader.getSumTotalTermFreq(field)
if n == 0:
return 0
else:
return len_all / float(n)
class LuceneDocument(object):
"""Internal representation of a Lucene document."""
def __init__(self):
self.ldf = LuceneDocumentField()
def create_document(self, contents):
"""Create a Lucene document from the specified contents.
Contents is a list of fields to be indexed, represented as a dictionary
with keys 'field_name', 'field_type', and 'field_value'."""
doc = Document()
for f in contents:
doc.add(Field(f['field_name'], f['field_value'],
self.ldf.get_field(f['field_type'])))
return doc
class LuceneDocumentField(object):
"""Internal handler class for possible field types."""
def __init__(self):
"""Init possible field types."""
# FIELD_ID: stored, indexed, non-tokenized
self.field_id = FieldType()
self.field_id.setIndexed(True)
self.field_id.setStored(True)
self.field_id.setTokenized(False)
# FIELD_ID_TV: stored, indexed, not tokenized, with term vectors (without positions)
# for storing IDs with term vector info
self.field_id_tv = FieldType()
self.field_id_tv.setIndexed(True)
self.field_id_tv.setStored(True)
self.field_id_tv.setTokenized(False)
self.field_id_tv.setStoreTermVectors(True)
# FIELD_TEXT: stored, indexed, tokenized, with positions
self.field_text = FieldType()
self.field_text.setIndexed(True)
self.field_text.setStored(True)
self.field_text.setTokenized(True)
# FIELD_TEXT_TV: stored, indexed, tokenized, with term vectors (without positions)
self.field_text_tv = FieldType()
self.field_text_tv.setIndexed(True)
self.field_text_tv.setStored(True)
self.field_text_tv.setTokenized(True)
self.field_text_tv.setStoreTermVectors(True)
# FIELD_TEXT_TVP: stored, indexed, tokenized, with term vectors and positions
# (but no character offsets)
self.field_text_tvp = FieldType()
self.field_text_tvp.setIndexed(True)
self.field_text_tvp.setStored(True)
self.field_text_tvp.setTokenized(True)
self.field_text_tvp.setStoreTermVectors(True)
self.field_text_tvp.setStoreTermVectorPositions(True)
# FIELD_TEXT_NTV: not stored, indexed, tokenized, with term vectors (without positions)
self.field_text_ntv = FieldType()
self.field_text_ntv.setIndexed(True)
self.field_text_ntv.setStored(False)
self.field_text_ntv.setTokenized(True)
self.field_text_ntv.setStoreTermVectors(True)
# FIELD_TEXT_TVP: not stored, indexed, tokenized, with term vectors and positions
# (but no character offsets)
self.field_text_ntvp = FieldType()
self.field_text_ntvp.setIndexed(True)
self.field_text_ntvp.setStored(False)
self.field_text_ntvp.setTokenized(True)
self.field_text_ntvp.setStoreTermVectors(True)
self.field_text_ntvp.setStoreTermVectorPositions(True)
def get_field(self, type):
"""Gets Lucene FieldType object for the corresponding internal FIELDTYPE_ value."""
if type == Lucene.FIELDTYPE_ID:
return self.field_id
elif type == Lucene.FIELDTYPE_ID_TV:
return self.field_id_tv
elif type == Lucene.FIELDTYPE_TEXT:
return self.field_text
elif type == Lucene.FIELDTYPE_TEXT_TV:
return self.field_text_tv
elif type == Lucene.FIELDTYPE_TEXT_TVP:
return self.field_text_tvp
elif type == Lucene.FIELDTYPE_TEXT_NTV:
return self.field_text_ntv
elif type == Lucene.FIELDTYPE_TEXT_NTVP:
return self.field_text_ntvp
else:
raise Exception("Unknown field type") | {
"content_hash": "2f77d47301fc1051757c9cbcbc0480f5",
"timestamp": "",
"source": "github",
"line_count": 574,
"max_line_length": 104,
"avg_line_length": 38.217770034843205,
"alnum_prop": 0.627068423211925,
"repo_name": "hasibi/EntityLinkingRetrieval-ELR",
"id": "f10d7d9b299d3f88d6cc19daf4004482f7a10811",
"size": "21937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nordlys/retrieval/lucene_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83052"
}
],
"symlink_target": ""
} |
import pafy
import datetime
import os
class Stream_Generator():
def __init__(self, url, start_time, end_time, format_type,
chosen_format, top_dir):
self.url = url
self.start_time = start_time
self.end_time = end_time
self.start_time_set = False
self.end_time_set = False
self.title = None
self.duration = None
self.stream = None
self.vid = None
self.chosen_format = chosen_format
self.sub_format = None
self.format_type = format_type
self.file_path = None # final path to download stream to
self.temp_file_path = None # temporary file path for convertion/trimming
self.top_dir = top_dir
self.error_messages = None
self.disallowed_characters = ['~', '#', '%', '*', '{', '}', '\\',
':', '<', '>', '?', '/', '+', '|', '"']
def generate(self):
# build stream and set user configuration while handling errors
self.set_url()
if self.get_errors():
return
self.set_time()
if self.get_errors():
return
self.set_media_format()
if self.get_errors():
return
self.set_title()
self.set_file_path()
def set_url(self):
# create a new pafy object from url
try:
#logger.log_debug('Checking if URL exists \'%s\'' % url)
self.vid = pafy.new(self.url)
#logger.log_debug('URL found.')
except (IOError, ValueError): # Catches the exception if the URL wasn't found
self.error_messages = ('URL not found: %s' %self.url)
# handle any unexpected pafy exceptions
except Exception as e:
self.error_messages = ('An unexpected error occurred when searching for '
'URL \'%s\', please try again.' %self.url)
print(e)
#logger.log_debug('URL not found. Exception: %s' % e)
def set_title(self):
# parse the title of the stream so that it is allowed to be used as a filename
title = self.stream.title
title = title.replace('"', '\'')
title = title.replace('&', 'and')
for character in self.disallowed_characters:
title = title.replace(character, '')
self.title = title
def set_file_path(self):
# builds a file path for the stream to be downloaded to and builds a temporary
# path if a conversion is required or the stream needs to be trimmed. duplicate
# filenames are checked and handled here.
file_path = os.path.join(self.top_dir, self.title)
# add ' - Copy' to the filename until its unique
while os.path.isfile(file_path + "." + self.chosen_format):
file_path += " - Copy"
if self.is_convert_required():
temp_file_path = file_path + '_TEMP'
# add '_TEMP' to the temporary file filename until its unique
while os.path.isfile(temp_file_path + '.' + self.sub_format):
temp_file_path += '_TEMP'
self.temp_file_path = temp_file_path + '.' + self.sub_format
elif self.is_trimmed():
temp_file_path = file_path + '_TEMP'
# add '_TEMP' to the temporary file filename until its unique
while os.path.isfile(temp_file_path + '.' + self.chosen_format):
temp_file_path += '_TEMP'
self.temp_file_path = temp_file_path + '.' + self.chosen_format
self.file_path = file_path + '.' + self.chosen_format
def set_time(self):
# carry's out various checks and sets the start and end time
duration = datetime.datetime.strptime('%s' %self.get_duration(), '%H:%M:%S').time()
# check if a start time has been defined
if self.start_time:
self.start_time_set = True
try:
self.start_time = datetime.datetime.strptime('%s' %self.start_time,
'%H:%M:%S').time()
except ValueError:
# catch an exception when the time does not match the pattern 'HH:MM:SS'
self.error_messages = ("The \'start time\' does not match the format \'HH:MM:SS\' for URL: \'%s\'."
%self.url)
return
# check that the start time is less than the duration
if self.start_time > duration:
self.error_messages = ("The start time must be less than the duration for URL: \'%s\'"
%self.url)
else:
self.start_time = datetime.datetime.strptime('00:00:00',
'%H:%M:%S').time()
# check if a end time has been defined
if self.end_time:
self.end_time_set = True
try:
self.end_time = datetime.datetime.strptime('%s' %self.end_time, '%H:%M:%S').time()
except ValueError:
self.error_messages = ("The \'end time\' does not match the format \'HH:MM:SS\' for URL: \'%s\'."
%self.url)
return
if self.end_time > duration:
self.error_messages = ("The end time must be less than the duration for URL: \'%s\'"
%self.url)
else:
self.end_time = duration
if self.start_time > self.end_time:
self.error_messages = ("The start time must be less than the end time for URL: \'%s\'"
%self.url)
return
self.duration = duration
def set_media_format(self):
# cycle through all streams available to check if the user defined file format is supported
for s in self.get_allstreamlist():
if s.extension == self.chosen_format and s.mediatype == "normal":
self.stream = self.get_bestnormal(self.chosen_format)
return
if s.extension == self.chosen_format and s.mediatype == "audio":
self.stream = self.get_bestaudio(self.chosen_format)
return
# if the chosen file format is not in the stream list, get the best quality stream of
# the same format type as a substitution for now
if self.format_type == 'av':
self.stream = self.get_bestnormal()
elif self.format_type == 'a':
self.stream = self.get_bestaudio()
# get the format of the stream generated
self.sub_format = self.stream.extension
def update_properties(self):
# update stream attributes to account for internal/environment changes
self.set_title()
self.set_file_path()
def is_convert_required(self):
# check if the video needs to be converted to the chosen_format
if self.sub_format:
return True
else:
return False
def is_start_time_set(self):
# check if user has defined the start_time
return self.start_time_set
def is_end_time_set(self):
# check if user has defined the end_time
return self.end_time_set
def is_trimmed(self):
# checks if the user has trimmed the times of the video
if self.start_time_set or self.end_time_set:
return True
else:
return False
def get_errors(self):
return self.error_messages
def get_stream(self):
# return stream object
return self.stream
def get_title(self):
# return video title
return self.title
def get_url(self):
# return youtube URL
return self.url
def get_start_time(self):
# returns user specified start time
return self.start_time
def get_end_time(self):
# returns user specified end time
return self.end_time
def get_chosen_format(self):
# return user chosen format
return self.chosen_format
def get_file_path(self):
# returns the designated file path
return self.file_path
def get_temp_file_path(self):
# returns the temporary file path for use with convertion and trimming
return self.temp_file_path
def get_duration(self):
# return video duration in its standard form
return self.vid.duration
def get_published(self):
# return video publish date
return self.vid.published
def get_allstreamlist(self):
# return all streams available
return self.vid.allstreams
def get_bestnormal(self, stream_format= "any"):
# return the best audio and video stream
return self.vid.getbest(preftype=stream_format)
def get_bestaudio(self, stream_format = "any"):
# return the best audio stream only
return self.vid.getbestaudio(preftype=stream_format)
| {
"content_hash": "484d4e2210ca344cecfb363a905363a4",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 115,
"avg_line_length": 39.85840707964602,
"alnum_prop": 0.5595026642984015,
"repo_name": "sammypg/youtube_downloader",
"id": "2447d504112da659410a6f868067af34d5032153",
"size": "9008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70580"
}
],
"symlink_target": ""
} |
"""Test the System Bridge config flow."""
from unittest.mock import patch
from aiohttp.client_exceptions import ClientConnectionError
from systembridge.exceptions import BridgeAuthenticationException
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.system_bridge.const import DOMAIN
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from tests.common import MockConfigEntry
FIXTURE_MAC_ADDRESS = "aa:bb:cc:dd:ee:ff"
FIXTURE_UUID = "e91bf575-56f3-4c83-8f42-70ac17adcd33"
FIXTURE_AUTH_INPUT = {CONF_API_KEY: "abc-123-def-456-ghi"}
FIXTURE_USER_INPUT = {
CONF_API_KEY: "abc-123-def-456-ghi",
CONF_HOST: "test-bridge",
CONF_PORT: "9170",
}
FIXTURE_ZEROCONF_INPUT = {
CONF_API_KEY: "abc-123-def-456-ghi",
CONF_HOST: "1.1.1.1",
CONF_PORT: "9170",
}
FIXTURE_ZEROCONF = {
CONF_HOST: "1.1.1.1",
CONF_PORT: 9170,
"hostname": "test-bridge.local.",
"type": "_system-bridge._udp.local.",
"name": "System Bridge - test-bridge._system-bridge._udp.local.",
"properties": {
"address": "http://test-bridge:9170",
"fqdn": "test-bridge",
"host": "test-bridge",
"ip": "1.1.1.1",
"mac": FIXTURE_MAC_ADDRESS,
"port": "9170",
"uuid": FIXTURE_UUID,
},
}
FIXTURE_ZEROCONF_BAD = {
CONF_HOST: "1.1.1.1",
CONF_PORT: 9170,
"hostname": "test-bridge.local.",
"type": "_system-bridge._udp.local.",
"name": "System Bridge - test-bridge._system-bridge._udp.local.",
"properties": {
"something": "bad",
},
}
FIXTURE_INFORMATION = {
"address": "http://test-bridge:9170",
"apiPort": 9170,
"fqdn": "test-bridge",
"host": "test-bridge",
"ip": "1.1.1.1",
"mac": FIXTURE_MAC_ADDRESS,
"updates": {
"available": False,
"newer": False,
"url": "https://github.com/timmo001/system-bridge/releases/tag/v2.3.2",
"version": {"current": "2.3.2", "new": "2.3.2"},
},
"uuid": FIXTURE_UUID,
"version": "2.3.2",
"websocketAddress": "ws://test-bridge:9172",
"websocketPort": 9172,
}
FIXTURE_BASE_URL = (
f"http://{FIXTURE_USER_INPUT[CONF_HOST]}:{FIXTURE_USER_INPUT[CONF_PORT]}"
)
FIXTURE_ZEROCONF_BASE_URL = (
f"http://{FIXTURE_ZEROCONF[CONF_HOST]}:{FIXTURE_ZEROCONF[CONF_PORT]}"
)
async def test_user_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test full user flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
aioclient_mock.get(
f"{FIXTURE_BASE_URL}/information",
headers={"Content-Type": "application/json"},
json=FIXTURE_INFORMATION,
)
with patch(
"homeassistant.components.system_bridge.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "test-bridge"
assert result2["data"] == FIXTURE_USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
aioclient_mock.get(
f"{FIXTURE_BASE_URL}/information", exc=BridgeAuthenticationException
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
aioclient_mock.get(f"{FIXTURE_BASE_URL}/information", exc=ClientConnectionError)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.system_bridge.config_flow.Bridge.async_get_information",
side_effect=Exception("Boom"),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_authorization_error(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test we show user form on authorization error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
aioclient_mock.get(
f"{FIXTURE_BASE_URL}/information", exc=BridgeAuthenticationException
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_connection_error(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test we show user form on connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
aioclient_mock.get(f"{FIXTURE_BASE_URL}/information", exc=ClientConnectionError)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_reauth_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test reauth flow."""
mock_config = MockConfigEntry(
domain=DOMAIN, unique_id=FIXTURE_UUID, data=FIXTURE_USER_INPUT
)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
aioclient_mock.get(
f"{FIXTURE_BASE_URL}/information",
headers={"Content-Type": "application/json"},
json=FIXTURE_INFORMATION,
)
with patch(
"homeassistant.components.system_bridge.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test zeroconf flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=FIXTURE_ZEROCONF,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert not result["errors"]
aioclient_mock.get(
f"{FIXTURE_ZEROCONF_BASE_URL}/information",
headers={"Content-Type": "application/json"},
json=FIXTURE_INFORMATION,
)
with patch(
"homeassistant.components.system_bridge.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "test-bridge"
assert result2["data"] == FIXTURE_ZEROCONF_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_cannot_connect(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test zeroconf cannot connect flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=FIXTURE_ZEROCONF,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert not result["errors"]
aioclient_mock.get(
f"{FIXTURE_ZEROCONF_BASE_URL}/information", exc=ClientConnectionError
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_zeroconf_bad_zeroconf_info(
hass, aiohttp_client, aioclient_mock, current_request_with_host
) -> None:
"""Test zeroconf cannot connect flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=FIXTURE_ZEROCONF_BAD,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
| {
"content_hash": "cd17b811020d16f9f59764030a01bea8",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 90,
"avg_line_length": 31.410764872521245,
"alnum_prop": 0.6516053391053391,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "1b46bb45a6d6d681acb5b52bd445b3d4b6879e69",
"size": "11088",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/system_bridge/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import os
import os.path
import sys
import itertools
import collections
sys.path.append(
os.path.join(
os.environ.get( "SPLUNK_HOME", "/opt/splunk/6.1.3" ),
"etc/apps/framework/contrib/splunk-sdk-python/1.3.0",
)
)
import execnet
from splunklib.searchcommands import StreamingCommand, ReportingCommand, Configuration
class SimpleRemoteCommand(object):
code = """
import sys, os
if __name__ == '__channelexec__':
items = []
for record in channel:
if not record:
break
items.append(record)
for record in items:
record['from_remote'] = 1
channel.send(record)
"""
# model_file = ""
# save_model = False # Override
def __init__(self, *args, **kwargs):
super(SimpleRemoteCommand, self).__init__(*args, **kwargs)
mdir, _ = os.path.split(__file__)
cdir = os.path.normpath(os.path.join(mdir, ".."))
self.gw = execnet.makegateway("popen//python=/usr/bin/python//chdir=%s" % cdir)
def remote_exec(self, records, code):
channel = self.gw.remote_exec(code)
retval = []
for record in records:
channel.send(record)
channel.send(None)
channel.setcallback(retval.append)
channel.waitclose()
return retval
def remote_stream(self, records):
if not type(self).code:
return
return self.remote_exec(records, type(self).code)
class SimpleRemoteStreamingCommand(StreamingCommand, SimpleRemoteCommand):
def __init__(self, *args, **kwargs):
StreamingCommand.__init__(self, *args, **kwargs)
SimpleRemoteCommand.__init__(self, *args, **kwargs)
def stream(self, records):
for record in self.remote_stream(records):
yield record
class SimpleRemoteReportingCommand(ReportingCommand, SimpleRemoteCommand):
reduce_code = """
import sys, os
if __name__ == '__channelexec__':
items = []
for record in channel:
if not record:
break
items.append(record)
channel.send({ 'total_remote': len(items) })
"""
def __init__(self, *args, **kwargs):
ReportingCommand.__init__(self, *args, **kwargs)
SimpleRemoteCommand.__init__(self, *args, **kwargs)
def remote_reduce(self, records):
if not type(self).reduce_code:
return
return self.remote_exec(records, type(self).reduce_code)
def map(self, records):
for record in self.remote_stream(records):
yield record
def reduce(self, records):
for record in self.remote_reduce(records):
if '_fields' in record:
fields = record.pop('_fields')
yield collections.OrderedDict(sorted(record.items(),key=lambda t: fields.index(t[0])))
else:
yield record
class MultiRemoteReportingCommand(SimpleRemoteReportingCommand):
default_args = ''
reduce_code = """
import sys, os
if __name__ == '__channelexec__':
args = None
items = []
for record in channel:
if not record:
break
if isinstance(item, basestring):
args = item
continue
items.append(record)
channel.send({ 'total_remote': len(items) })
"""
def reduce(self, records):
args = type(self).default_args
if self.fieldnames:
args = [s.lower() for s in self.fieldnames]
args = u" ".join(args)
for record in self.remote_exec(itertools.chain([args],records), type(self).reduce_code):
if 'error' in record:
self.messages.append('error_message', record['error'])
return
if '_fields' in record:
fields = record.pop('_fields')
yield collections.OrderedDict(sorted(record.items(),key=lambda t: fields.index(t[0])))
else:
yield record
class OptionCommandMixin(object):
default_args = {}
def getargs(self):
args = type(self).default_args or {}
for attr in dir(self):
item = getattr(self, attr, None)
if isinstance(item, file):
args[attr] = item.name
else:
args[attr] = item
if self.fieldnames:
args['fieldnames'] = self.fieldnames
return args
def clean_generator(self, records):
try:
for record in records:
yield record
except:
yield {}
class OptionRemoteStreamingCommand(SimpleRemoteStreamingCommand, OptionCommandMixin):
default_args = {}
def __dir__(self):
raise "Please implement __dir__ to get the list of options"
def stream(self, records):
args = self.getargs()
for record in self.remote_exec(itertools.chain([args],self.clean_generator(records)), type(self).code):
if 'error' in record:
self.messages.append('error_message', record['error'])
return
yield record
class OptionRemoteReportingCommand(SimpleRemoteReportingCommand, OptionCommandMixin):
default_args = {}
reduce_code = """
import sys, os
if __name__ == '__channelexec__':
args = None
items = []
for record in channel:
if not record:
break
if not args:
args = item
continue
items.append(record)
channel.send({ 'total_remote': len(items) })
"""
def __dir__(self):
raise "Please implement __dir__ to get the list of options"
def reduce(self, records):
args = self.getargs()
for record in self.remote_exec(itertools.chain([args],self.clean_generator(records)), type(self).reduce_code):
if 'error' in record:
self.messages.append('error_message', record['error'])
return
if '_fields' in record:
fields = record.pop('_fields')
yield collections.OrderedDict(sorted(record.items(),key=lambda t: fields.index(t[0])))
else:
yield record
| {
"content_hash": "a947746b722db9624828063b4c0420b7",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 112,
"avg_line_length": 24.096774193548388,
"alnum_prop": 0.6783323771275579,
"repo_name": "nlproc/splunkml",
"id": "b0c6908bba50da6737d324b80c386a05a29763a1",
"size": "5229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/remote_commands/simple.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24621"
}
],
"symlink_target": ""
} |
VERSION_TABLE = 'cisco_alembic_version'
| {
"content_hash": "881357d3cececfd1e4a55d80989e0acd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.775,
"repo_name": "CiscoSystems/networking-cisco",
"id": "820ba1339e56632f6daa2748da793103c20ded15",
"size": "726",
"binary": false,
"copies": "4",
"ref": "refs/heads/asr1k_liberty_master_wip",
"path": "networking_cisco/db/migration/alembic_migrations/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2082062"
},
{
"name": "Shell",
"bytes": "44368"
}
],
"symlink_target": ""
} |
class LinkedInException(Exception):
def __init__(self, cause=None, status_code=None, response=None):
self.cause = cause
self.status_code = status_code
self.response = response
| {
"content_hash": "5c77c41ec39de99693640b580395b6b2",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 68,
"avg_line_length": 40.8,
"alnum_prop": 0.6617647058823529,
"repo_name": "kylemcc/pylinkedin",
"id": "a1349e8162826af9f69cc2e087f94945b61ba667",
"size": "204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylinkedin/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9015"
}
],
"symlink_target": ""
} |
__author__ = 'Simone Campagna'
import numpy
| {
"content_hash": "2c668c2fadf487c64ac41eff11f0de4f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 15,
"alnum_prop": 0.6888888888888889,
"repo_name": "simone-campagna/buffered-numpy-arrays",
"id": "2860de511405c4a0faf3684e2f35ff6739d39c2f",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buffered_arrays/buffered_numpy_arrays/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25245"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
import tempfile
import time
import unittest
from unittest import mock
import psutil
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.cli.commands import webserver_command
from airflow.cli.commands.webserver_command import GunicornMonitor
from airflow.utils.cli import setup_locations
from tests.test_utils.config import conf_vars
class TestGunicornMonitor(unittest.TestCase):
def setUp(self) -> None:
self.monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
mock.patch.object(self.monitor, '_generate_plugin_state', return_value={}).start()
mock.patch.object(self.monitor, '_get_num_ready_workers_running', return_value=4).start()
mock.patch.object(self.monitor, '_get_num_workers_running', return_value=4).start()
mock.patch.object(self.monitor, '_spawn_new_workers', return_value=None).start()
mock.patch.object(self.monitor, '_kill_old_workers', return_value=None).start()
mock.patch.object(self.monitor, '_reload_gunicorn', return_value=None).start()
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_wait_for_workers_to_start(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 0
self.monitor._get_num_workers_running.return_value = 4
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_kill_excess_workers(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 10
self.monitor._get_num_workers_running.return_value = 10
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_start_new_workers_when_missing(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 2
self.monitor._get_num_workers_running.return_value = 2
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_start_new_workers_when_refresh_interval_has_passed(self, mock_sleep):
self.monitor._last_refresh_time -= 200
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_reload_when_plugin_has_been_changed(self, mock_sleep):
self.monitor._generate_plugin_state.return_value = {'AA': 12}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.monitor._generate_plugin_state.return_value = {'AA': 32}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.monitor._generate_plugin_state.return_value = {'AA': 32}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_called_once_with() # pylint: disable=no-member
self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
class TestGunicornMonitorGeneratePluginState(unittest.TestCase):
@staticmethod
def _prepare_test_file(filepath: str, size: int):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as file:
file.write("A" * size)
file.flush()
def test_should_detect_changes_in_directory(self):
with tempfile.TemporaryDirectory() as tempdir, mock.patch(
"airflow.cli.commands.webserver_command.settings.PLUGINS_FOLDER", tempdir
):
self._prepare_test_file(f"{tempdir}/file1.txt", 100)
self._prepare_test_file(f"{tempdir}/nested/nested/nested/nested/file2.txt", 200)
self._prepare_test_file(f"{tempdir}/file3.txt", 300)
monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
# When the files have not changed, the result should be constant
state_a = monitor._generate_plugin_state()
state_b = monitor._generate_plugin_state()
self.assertEqual(state_a, state_b)
self.assertEqual(3, len(state_a))
# Should detect new file
self._prepare_test_file(f"{tempdir}/file4.txt", 400)
state_c = monitor._generate_plugin_state()
self.assertNotEqual(state_b, state_c)
self.assertEqual(4, len(state_c))
# Should detect changes in files
self._prepare_test_file(f"{tempdir}/file4.txt", 450)
state_d = monitor._generate_plugin_state()
self.assertNotEqual(state_c, state_d)
self.assertEqual(4, len(state_d))
# Should support large files
self._prepare_test_file(f"{tempdir}/file4.txt", 4000000)
state_d = monitor._generate_plugin_state()
self.assertNotEqual(state_c, state_d)
self.assertEqual(4, len(state_d))
class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self):
self.children = mock.MagicMock()
self.child = mock.MagicMock()
self.process = mock.MagicMock()
self.monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
def test_ready_prefix_on_cmdline(self):
self.child.cmdline.return_value = [settings.GUNICORN_WORKER_READY_PREFIX]
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 1)
def test_ready_prefix_on_cmdline_no_children(self):
self.process.children.return_value = []
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
def test_ready_prefix_on_cmdline_zombie(self):
self.child.cmdline.return_value = []
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
def test_ready_prefix_on_cmdline_dead_process(self):
self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
class TestCliWebServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self) -> None:
self._check_processes()
self._clean_pidfiles()
def _check_processes(self, ignore_running=False):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
exit_code_pgrep_webserver = subprocess.Popen(["pgrep", "-c", "-f", "airflow webserver"]).wait()
exit_code_pgrep_gunicorn = subprocess.Popen(["pgrep", "-c", "-f", "gunicorn"]).wait()
if exit_code_pgrep_webserver != 1 or exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["ps", "-ax"]).wait()
if exit_code_pgrep_webserver != 1:
subprocess.Popen(["pkill", "-9", "-f", "airflow webserver"]).wait()
if exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["pkill", "-9", "-f", "gunicorn"]).wait()
if not ignore_running:
raise AssertionError(
"Background processes are running that prevent the test from passing successfully."
)
def tearDown(self) -> None:
self._check_processes(ignore_running=True)
self._clean_pidfiles()
def _clean_pidfiles(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
if os.path.exists(pidfile_webserver):
os.remove(pidfile_webserver)
if os.path.exists(pidfile_monitor):
os.remove(pidfile_monitor)
def _wait_pidfile(self, pidfile):
start_time = time.monotonic()
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception: # pylint: disable=broad-except
if start_time - time.monotonic() > 60:
raise
time.sleep(1)
def test_cli_webserver_foreground(self):
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(["airflow", "webserver"])
self.assertEqual(None, proc.poll())
# Wait for process
time.sleep(10)
# Terminate webserver
proc.terminate()
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(60), (-15, 0))
def test_cli_webserver_foreground_with_pid(self):
with tempfile.TemporaryDirectory(prefix='tmp-pid') as tmpdir:
pidfile = f"{tmpdir}/pidfile"
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
self.assertEqual(None, proc.poll())
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
proc.terminate()
self.assertEqual(0, proc.wait(60))
@pytest.mark.quarantined
def test_cli_webserver_background(self):
with tempfile.TemporaryDirectory(prefix="gunicorn") as tmpdir, mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
pidfile_webserver = f"{tmpdir}/pidflow-webserver.pid"
pidfile_monitor = f"{tmpdir}/pidflow-webserver-monitor.pid"
stdout = f"{tmpdir}/airflow-webserver.out"
stderr = f"{tmpdir}/airflow-webserver.err"
logfile = f"{tmpdir}/airflow-webserver.log"
try:
# Run webserver as daemon in background. Note that the wait method is not called.
proc = subprocess.Popen(
[
"airflow",
"webserver",
"--daemon",
"--pid",
pidfile_webserver,
"--stdout",
stdout,
"--stderr",
stderr,
"--log-file",
logfile,
]
)
self.assertEqual(None, proc.poll())
pid_monitor = self._wait_pidfile(pidfile_monitor)
self._wait_pidfile(pidfile_webserver)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(
0, subprocess.Popen(["pgrep", "-f", "-c", "airflow webserver --daemon"]).wait()
)
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "-f", "gunicorn: master"]).wait())
# Terminate monitor process.
proc = psutil.Process(pid_monitor)
proc.terminate()
self.assertIn(proc.wait(120), (0, None))
self._check_processes()
except Exception:
# List all logs
subprocess.Popen(["ls", "-lah", tmpdir]).wait()
# Dump all logs
subprocess.Popen(["bash", "-c", f"ls {tmpdir}/* | xargs -n 1 -t cat"]).wait()
raise
# Patch for causing webserver timeout
@mock.patch(
"airflow.cli.commands.webserver_command.GunicornMonitor._get_num_workers_running", return_value=0
)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
webserver_command.webserver(args)
self.assertEqual(e.exception.code, 1)
def test_cli_webserver_debug(self):
env = os.environ.copy()
proc = psutil.Popen(["airflow", "webserver", "--debug"], env=env)
time.sleep(3) # wait for webserver to start
return_code = proc.poll()
self.assertEqual(
None, return_code, f"webserver terminated with return code {return_code} in debug mode"
)
proc.terminate()
self.assertEqual(-15, proc.wait(60))
def test_cli_webserver_access_log_format(self):
# json access log format
access_logformat = (
"{\"ts\":\"%(t)s\",\"remote_ip\":\"%(h)s\",\"request_id\":\"%({"
"X-Request-Id}i)s\",\"code\":\"%(s)s\",\"request_method\":\"%(m)s\","
"\"request_path\":\"%(U)s\",\"agent\":\"%(a)s\",\"response_time\":\"%(D)s\","
"\"response_length\":\"%(B)s\"} "
)
with tempfile.TemporaryDirectory() as tmpdir, mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
access_logfile = f"{tmpdir}/access.log"
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(
[
"airflow",
"webserver",
"--access-logfile",
access_logfile,
"--access-logformat",
access_logformat,
]
)
self.assertEqual(None, proc.poll())
# Wait for webserver process
time.sleep(10)
proc2 = subprocess.Popen(["curl", "http://localhost:8080"])
proc2.wait(10)
try:
file = open(access_logfile)
log = json.loads(file.read())
self.assertEqual('127.0.0.1', log.get('remote_ip'))
self.assertEqual(len(log), 9)
self.assertEqual('GET', log.get('request_method'))
except OSError:
print("access log file not found at " + access_logfile)
# Terminate webserver
proc.terminate()
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(60), (-15, 0))
self._check_processes()
| {
"content_hash": "8d5de73ee6cc8e3c1c02518622c25dff",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 105,
"avg_line_length": 42.16786570743405,
"alnum_prop": 0.5939490445859873,
"repo_name": "DinoCow/airflow",
"id": "1d7a6703df121b1cca62e3932b58d9ceaffb70af",
"size": "18370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/cli/commands/test_webserver_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
} |
import unittest
import gc
import logging
import mock
from multiprocessing import Process, Queue
import os
import sys
from shellbot import Context, Engine, Shell
from shellbot.events import Message
from shellbot.updaters import FileUpdater
my_engine = Engine()
my_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'local')
my_file = os.path.join(my_path, 'file_updater.log')
class UpdaterTests(unittest.TestCase):
def tearDown(self):
collected = gc.collect()
logging.info("Garbage collector: collected %d objects." % (collected))
def test_init(self):
logging.info('***** init')
u = FileUpdater()
self.assertEqual(u.engine, None)
u = FileUpdater(engine=my_engine)
self.assertEqual(u.engine, my_engine)
def test_on_init(self):
logging.info('***** on_init')
u = FileUpdater()
self.assertEqual(u.path, None)
u = FileUpdater(path=None)
self.assertEqual(u.path, None)
u = FileUpdater(path='')
self.assertEqual(u.path, '')
u = FileUpdater(path='here.log')
self.assertEqual(u.path, 'here.log')
def test_get_path(self):
logging.info('***** get_path')
u = FileUpdater(engine=my_engine)
self.assertEqual(u.get_path(), '/var/log/shellbot.log')
u = FileUpdater(engine=my_engine, path=None)
self.assertEqual(u.get_path(), '/var/log/shellbot.log')
u = FileUpdater(engine=my_engine, path='')
self.assertEqual(u.get_path(), '/var/log/shellbot.log')
u = FileUpdater(engine=my_engine, path='here.log')
self.assertEqual(u.get_path(), 'here.log')
def test_on_bond(self):
logging.info('***** on_bond')
try:
os.rmdir(os.path.join(my_path, 'on_bond'))
except:
pass
u = FileUpdater(path=os.path.join(my_path,
'on_bond',
'file.log'))
u.on_bond(bot='*dummy')
self.assertTrue(os.path.isdir(os.path.join(my_path, 'on_bond')))
try:
os.rmdir(os.path.join(my_path, 'on_bond'))
except:
pass
def test_put(self):
logging.info('***** put')
try:
os.remove(my_file)
except:
pass
try:
os.rmdir(my_path)
except:
pass
u = FileUpdater(path=my_file)
u.on_bond(bot='*dummy')
message_1 = Message({
'person_label': 'alice@acme.com',
'text': 'a first message',
})
u.put(message_1)
message_2 = Message({
'person_label': 'bob@acme.com',
'text': 'a second message',
})
u.put(message_2)
expected = '{"person_label": "alice@acme.com", "text": "a first message", "type": "message"}\n{"person_label": "bob@acme.com", "text": "a second message", "type": "message"}\n'
with open(my_file, 'r') as handle:
self.assertEqual(handle.read(), expected)
try:
os.remove(my_file)
except:
pass
try:
os.rmdir(my_path)
except:
pass
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
| {
"content_hash": "37645a1f2455c8a23bf6fdfad83d1d0f",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 184,
"avg_line_length": 24.822222222222223,
"alnum_prop": 0.5413309459862727,
"repo_name": "bernard357/shellbot",
"id": "9d99af0e18f7c11c171887e76ee610823024db67",
"size": "3398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/updaters/test_file.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2233"
},
{
"name": "Python",
"bytes": "807558"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.