text stringlengths 4 1.02M | meta dict |
|---|---|
'''
Created on Aug, 5 2015
@author: mlaptev
'''
def escape_unicode(fn):
def wrapped():
print "String to convert '{}'".format(fn())
return "".join([i if ord(i) < 128 else "\\" + str(hex(ord(i))) for i in fn()])
return wrapped
@escape_unicode
def some_non_latin_string():
return "This is just a строка!"
if __name__ == '__main__':
print some_non_latin_string() | {
"content_hash": "c70f2eada898e996cbe3616452b936ec",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 86,
"avg_line_length": 21,
"alnum_prop": 0.5789473684210527,
"repo_name": "MikeLaptev/sandbox_python",
"id": "0f95ee380a480356d455daf3e8a42549a213b639",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mera/closures_and_decorators/decorator_for_unicode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Nginx",
"bytes": "591"
},
{
"name": "Python",
"bytes": "190991"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
import httplib2
import Queue
import re
import socket
import sys
import threading
import time
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from spider.utils import SpiderThread, fetch_url
class SpiderProfile(models.Model):
title = models.CharField(max_length=255)
url = models.CharField(max_length=255, help_text='Full URL, including http://')
time_limit = models.IntegerField(default=60, help_text='Maximum time to run spiders')
timeout = models.IntegerField(default=10, help_text='Socket and Queue timeout')
depth = models.IntegerField(default=2, help_text='How many pages deep to follow links')
threads = models.IntegerField(default=4, help_text='Number of concurrent spiders')
class Meta:
ordering = ('title',)
def __unicode__(self):
return '%s [%s]' % (self.title, self.url)
def get_absolute_url(self):
return reverse('profiles_profile_detail', args=[self.pk])
def spider(self):
session = SpiderSession.objects.create(spider_profile=self)
return session.spider()
def check_health(self):
status_check = ProfileStatusCheck.objects.create(spider_profile=self)
status_check.check_health()
return status_check
def latest_status(self):
try:
return self.status_checks.all()[0]
except IndexError:
pass
class SpiderSession(models.Model):
spider_profile = models.ForeignKey(SpiderProfile, related_name='sessions')
created_date = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False)
class Meta:
ordering = ('-created_date',)
def __unicode__(self):
return '%s [%s]' % (self.spider_profile.title, self.created_date)
def get_absolute_url(self):
return reverse('profiles_session_detail', args=[self.spider_profile.pk, self.pk])
def spider(self):
# these were originally args, but thought i'd move them to the profile
# model:
timeout = self.spider_profile.timeout
time_limit = self.spider_profile.time_limit
depth = self.spider_profile.depth
threads = self.spider_profile.threads
# a queue to store a 3-tuple of (url to fetch, source url, depth)
pending_urls = Queue.Queue()
# store a 3-tuple of (unsaved URLResponse, urls found, depth remaining)
processed_responses = Queue.Queue()
# event triggered when threads should shut down
finished = threading.Event()
# store the initial time to track how long spider runs for
start = time.time()
# response statuses keyed by url
visited = {}
# track what urls are scheduled to ensure we don't hit things twice
scheduled = set()
# create a couple of threads to chew on urls
threads = [
SpiderThread(pending_urls, processed_responses, finished, self) \
for x in range(threads)
]
# start with the source url
pending_urls.put((self.spider_profile.url, '', depth))
scheduled.add(self.spider_profile.url)
# start our worker threads
[t.start() for t in threads]
while 1:
try:
# pull an item from the response queue
result_dict, urls, depth = processed_responses.get(timeout=timeout)
except Queue.Empty:
pass
else:
# save the result
url_result = URLResult(**result_dict)
url_result.session = self
url_result.save()
processed_url = url_result.url
# remove from the list of scheduled items
scheduled.remove(processed_url)
# store response status in the visited dictionary
visited[processed_url] = url_result.response_status
# enqueue any urls that need to be checked
if depth > 0:
for url in urls:
if url not in visited and url not in scheduled:
scheduled.add(url)
pending_urls.put((url, processed_url, depth - 1))
finally:
if time.time() - start >= time_limit:
# set the finished flag
finished.set()
# wait for all the threads to finish up
[t.join() for t in threads]
break
self.complete = True
self.save()
return visited
def results_with_status(self, status):
return self.results.filter(response_status=status)
def results_404(self):
return self.results_with_status(404)
def results_500(self):
return self.results_with_status(500)
def results_200(self):
return self.results_with_status(200)
def new_this_session(self, status):
# results with the given status that were not present in the previous
# session
previous_qs = SpiderSession.objects.filter(
spider_profile=self.spider_profile,
created_date__lt=self.created_date
).order_by('-created_date')
current_results = self.results_with_status(status)
try:
last_session = previous_qs[0]
except IndexError:
return current_results
previous_results = last_session.results_with_status(status)
return current_results.exclude(url__in=previous_results.values('url'))
def new_404(self):
return self.new_this_session(404)
def new_500(self):
return self.new_this_session(500)
def new_200(self):
return self.new_this_session(200)
class URLResult(models.Model):
session = models.ForeignKey(SpiderSession, related_name='results')
url = models.CharField(max_length=255)
source_url = models.CharField(max_length=255)
content = models.TextField()
response_status = models.IntegerField()
response_time = models.FloatField()
content_length = models.IntegerField()
created_date = models.DateTimeField(auto_now_add=True)
# store any urls extracted from the content during spidering
urls = []
class Meta:
ordering = ('-session__pk', 'source_url', 'url',)
def __unicode__(self):
return '%s [%s]' % (self.url, self.response_status)
def get_absolute_url(self):
return reverse('profiles_url_result_detail', args=[
self.session.spider_profile_id, self.session_id, self.pk
])
def previous_results(self):
return URLResult.objects.filter(
url=self.url,
created_date__lt=self.created_date
).order_by('-created_date')
def previous_status(self):
previous_qs = self.previous_results()
try:
most_recent = previous_qs[0]
except IndexError:
return None
else:
return most_recent.response_status
def short_url(self):
return re.sub('^([a-z]+:\/\/)?([^\/]+)', '', self.url)
class ProfileStatusCheck(models.Model):
spider_profile = models.ForeignKey(SpiderProfile, related_name='status_checks')
error_fetching = models.BooleanField(default=False)
response_status = models.IntegerField(blank=True, null=True)
response_time = models.FloatField(blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True)
exception = models.TextField()
class Meta:
ordering = ('-created_date',)
def __unicode__(self):
return '%s [%s]' % (self.spider_profile.url, self.response_status)
def check_health(self):
try:
start = time.time()
headers, resp = fetch_url(
self.spider_profile.url,
self.spider_profile.timeout
)
except (socket.error, AttributeError, httplib2.ServerNotFoundError):
self.error_fetching = True
exception = sys.exc_info()[1]
self.exception = repr(exception)
else:
self.response_status = int(headers['status'])
self.response_time = time.time() - start
self.save()
def is_ok(self):
return self.response_status == 200
| {
"content_hash": "17eb3b0caf98d75fda2c0381c2926f3f",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 91,
"avg_line_length": 33.13846153846154,
"alnum_prop": 0.5892525533890436,
"repo_name": "georgedorn/django-spider",
"id": "b13c287df75c72492eef889d0dccb995be36db0b",
"size": "8616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
import sqlalchemy as sa
from alembic import op
from airflow.models.base import COLLATION_ARGS
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250, **COLLATION_ARGS), nullable=False),
sa.Column('dag_id', sa.String(length=250, **COLLATION_ARGS), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
| {
"content_hash": "171619505d5b4493728b32df4b145b54",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 86,
"avg_line_length": 28.37142857142857,
"alnum_prop": 0.6717019133937563,
"repo_name": "apache/incubator-airflow",
"id": "4243e3a3b40fd82e7df9483646304084fba00609",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
from crowdsourcing.serializers.template import *
from rest_framework import viewsets
class TemplateViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import Template
queryset = Template.objects.all()
serializer_class = TemplateSerializer
class TemplateItemViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import TemplateItem
queryset = TemplateItem.objects.all()
serializer_class = TemplateItemSerializer
class TemplateItemPropertiesViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import TemplateItemProperties
queryset = TemplateItemProperties.objects.all()
serializer_class = TemplateItemPropertiesSerializer
| {
"content_hash": "237a5ae37e175f6483fc609e1068e7d6",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 59,
"avg_line_length": 29.652173913043477,
"alnum_prop": 0.8108504398826979,
"repo_name": "rakshit-agrawal/crowdsource-platform",
"id": "d647db7f3f590e260b1ec829600ac37f6b471835",
"size": "682",
"binary": false,
"copies": "13",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/viewsets/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "287571"
},
{
"name": "HTML",
"bytes": "206335"
},
{
"name": "JavaScript",
"bytes": "151126"
},
{
"name": "Python",
"bytes": "258405"
},
{
"name": "Shell",
"bytes": "828"
}
],
"symlink_target": ""
} |
'''
This module was generated automatically. Do not edit directly.
'''
import collections
from SmartMeshSDK import ApiException
from IpMgrConnectorMuxInternal import IpMgrConnectorMuxInternal
##
# \addtogroup IpMgrConnectorMux
# \{
#
class IpMgrConnectorMux(IpMgrConnectorMuxInternal):
'''
\brief Public class for IP manager connector, over SerialMux.
'''
#======================== commands ========================================
##
# The named tuple returned by the dn_mux_hello() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>version</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_mux_hello = collections.namedtuple("Tuple_dn_mux_hello", ['RC', 'version'])
##
# Sent by the manager to initiate a new session with a client.
#
# \param version 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param secret 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_mux_hello named tuple.
#
def dn_mux_hello(self, version, secret) :
res = IpMgrConnectorMuxInternal.send(self, ['mux_hello'], {"version" : version, "secret" : secret})
return IpMgrConnectorMux.Tuple_dn_mux_hello(**res)
##
#
#
# \param version 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param cliSeqNo 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param mode 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: legacy
#
# \returns The response to the command.
#
def dn_hello(self, version, cliSeqNo, mode) :
res = IpMgrConnectorMuxInternal.send(self, ['hello'], {"version" : version, "cliSeqNo" : cliSeqNo, "mode" : mode})
return res
##
# The named tuple returned by the dn_hello_response() function.
#
# - <tt>successCode</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: success
# - 1: unsupported_version
# - 2: invalid_mode
# - <tt>version</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>mgrSeqNo</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>cliSeqNo</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>mode</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: legacy
#
Tuple_dn_hello_response = collections.namedtuple("Tuple_dn_hello_response", ['successCode', 'version', 'mgrSeqNo', 'cliSeqNo', 'mode'])
##
#
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_hello_response named tuple.
#
def dn_hello_response(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['hello_response'], {})
return IpMgrConnectorMux.Tuple_dn_hello_response(**res)
##
# The named tuple returned by the dn_reset() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_reset = collections.namedtuple("Tuple_dn_reset", ['RC', 'macAddress'])
##
# The reset command is used to reset various objects. The command argument is an object type, and if the object is a mote the MAC address must be specified (otherwise that argument is ignored).
#
# \param type 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: resetSystem
# - 2: resetMote
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_reset named tuple.
#
def dn_reset(self, type, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['reset'], {"type" : type, "macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_reset(**res)
##
# The named tuple returned by the dn_subscribe() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_subscribe = collections.namedtuple("Tuple_dn_subscribe", ['RC'])
##
# The subscribe command indicates that the manager should send the external application the specified notifications. It contains two filter fields:
#
# - filter is a bitmask of flags indicating the types of notifications that the client wants to receive
# - unackFilter allows the client to select which of the notifications selected in filter should be sent acknowledged. If a notification is sent as 'acknowledged', the subsequent notification packets will be queued while waiting for response.
#
# Each subscription request overwrites the previous one. If an application is subscribed to data and then decides he also wants events he should send a subscribe command with both the data and event flags set. To clear all subscriptions, the client should send a subscribe command with the filter set to zero. When a session is initiated between the manager and a client, the subscription filter is initialized to zero.
#
# The subscribe bitmap uses the values of the notification type enumeration. Some values are unused to provide backwards compatibility with earlier APIs.
#
# \param filter 4-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param unackFilter 4-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_subscribe named tuple.
#
def dn_subscribe(self, filter, unackFilter) :
res = IpMgrConnectorMuxInternal.send(self, ['subscribe'], {"filter" : filter, "unackFilter" : unackFilter})
return IpMgrConnectorMux.Tuple_dn_subscribe(**res)
##
# The named tuple returned by the dn_getTime() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>uptime</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcSecs</tt>: 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcUsecs</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>asn</tt>: 5-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>asnOffset</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getTime = collections.namedtuple("Tuple_dn_getTime", ['RC', 'uptime', 'utcSecs', 'utcUsecs', 'asn', 'asnOffset'])
##
# The getTime command returns the current manager UTC time and current absolute slot number (ASN). The time values returned by this command are delayed by queuing and transfer time over the serial connection. For additional precision, an external application should trigger the networkTime notification using the Time Pin.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getTime named tuple.
#
def dn_getTime(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getTime'], {})
return IpMgrConnectorMux.Tuple_dn_getTime(**res)
##
# The named tuple returned by the dn_setNetworkConfig() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setNetworkConfig = collections.namedtuple("Tuple_dn_setNetworkConfig", ['RC'])
##
# The setNetworkConfig command changes network configuration parameters. The response code indicates whether the changes were successfully applied. This change is persistent.
#
# Generally, changes to network configuration will take effect when the manager reboots. Exceptions are detailed below:
#
# - Max Motes: The new maxMotes value is used as soon as new motes try to join the network, but motes are not removed from the network if the value is set to a number lower than numMotes.
# - Base Bandwidth: Changing baseBandwidth while the network is running does not reallocate bandwidth to Operational motes.
#
# \param networkId 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param apTxPower 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# \param frameProfile 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 1: Profile_01
# \param maxMotes 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param baseBandwidth 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param downFrameMultVal 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param numParents 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param ccaMode 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: off
# - 1: energy
# - 2: carrier
# - 3: both
# \param channelList 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param autoStartNetwork 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
# \param locMode 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param bbMode 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: off
# - 1: upstream
# - 2: bidirectional
# \param bbSize 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param isRadioTest 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param bwMult 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param oneChannel 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setNetworkConfig named tuple.
#
def dn_setNetworkConfig(self, networkId, apTxPower, frameProfile, maxMotes, baseBandwidth, downFrameMultVal, numParents, ccaMode, channelList, autoStartNetwork, locMode, bbMode, bbSize, isRadioTest, bwMult, oneChannel) :
res = IpMgrConnectorMuxInternal.send(self, ['setNetworkConfig'], {"networkId" : networkId, "apTxPower" : apTxPower, "frameProfile" : frameProfile, "maxMotes" : maxMotes, "baseBandwidth" : baseBandwidth, "downFrameMultVal" : downFrameMultVal, "numParents" : numParents, "ccaMode" : ccaMode, "channelList" : channelList, "autoStartNetwork" : autoStartNetwork, "locMode" : locMode, "bbMode" : bbMode, "bbSize" : bbSize, "isRadioTest" : isRadioTest, "bwMult" : bwMult, "oneChannel" : oneChannel})
return IpMgrConnectorMux.Tuple_dn_setNetworkConfig(**res)
##
# The named tuple returned by the dn_clearStatistics() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_clearStatistics = collections.namedtuple("Tuple_dn_clearStatistics", ['RC'])
##
# The clearStatistics command clears the accumulated network statistics. The command does not clear path quality or mote statistics.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_clearStatistics named tuple.
#
def dn_clearStatistics(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['clearStatistics'], {})
return IpMgrConnectorMux.Tuple_dn_clearStatistics(**res)
##
# The named tuple returned by the dn_exchangeMoteJoinKey() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_exchangeMoteJoinKey = collections.namedtuple("Tuple_dn_exchangeMoteJoinKey", ['RC', 'callbackId'])
##
# The exchangeMoteJoinKey command triggers the manager to send a new join key to the specified mote and update the manager's ACL entry for the mote. The response contains a callbackId. A commandFinished event notification with this callbackId will be sent when the operation is complete. This change is persistent.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param key 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_exchangeMoteJoinKey named tuple.
#
def dn_exchangeMoteJoinKey(self, macAddress, key) :
res = IpMgrConnectorMuxInternal.send(self, ['exchangeMoteJoinKey'], {"macAddress" : macAddress, "key" : key})
return IpMgrConnectorMux.Tuple_dn_exchangeMoteJoinKey(**res)
##
# The named tuple returned by the dn_exchangeNetworkId() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_exchangeNetworkId = collections.namedtuple("Tuple_dn_exchangeNetworkId", ['RC', 'callbackId'])
##
# The exchangeNetworkId command triggers the manager to distribute a new network ID to all the motes in the network. A callbackId is returned in the response. A commandFinished notification with this callbackId will be sent when the operation is complete. This change is persistent.
#
# \param id 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_exchangeNetworkId named tuple.
#
def dn_exchangeNetworkId(self, id) :
res = IpMgrConnectorMuxInternal.send(self, ['exchangeNetworkId'], {"id" : id})
return IpMgrConnectorMux.Tuple_dn_exchangeNetworkId(**res)
##
# The named tuple returned by the dn_radiotestTx() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_radiotestTx = collections.namedtuple("Tuple_dn_radiotestTx", ['RC'])
##
# The radiotestTx command allows the user to initiate a radio transmission test. It may only be executed if the manager has been booted up in radiotest mode (see setNetworkConfig command). Four types of transmission tests are supported:
#
# - Packet transmission
# - Continuous modulation (CM)
# - Continuous wave, i.e unmodulated signal (CW)
# - Packet transmission with clear channel assessment (CCA) enabled (Available in Manager > 1.3.x)
#
# In a packet transmission test, the device generates a repeatCnt number of packet sequences. Each sequence consists of up to 10 packets with configurable size and delays. Each packet starts with a PHY preamble (5 bytes), followed by a PHY length field (1 byte), followed by data payload of up to 125 bytes, and finally a 2-byte 802.15.4 CRC at the end. Byte 0 of the payload contains stationId of the sender. Bytes 1 and 2 contain the packet number (in big-endian format) that increments with every packet transmitted. Bytes 3..N contain a counter (from 0..N-2) that increments with every byte inside payload. Transmissions occur on the set of channels defined by chanMask , selected in pseudo-random order.
#
# In a continuous modulation test, the device generates continuous pseudo-random modulated signal, centered at the specified channel. The test is stopped by resetting the device.
#
# In a continuous wave test, the device generates an unmodulated tone, centered at the specified channel. The test tone is stopped by resetting the device.
#
# In a packet transmission with CCA test, the device is configured identically to that in the packet transmission test, however the device does a clear channel assessment before each transmission and aborts that packet if the channel is busy.
#
# The station ID is a user selectable value. It is used in packet tests so that a receiver (see radiotestRx) can identify packets from this device in cases where there may be multiple tests running in the same radio space. This field is not used for CM or CW tests. (Available in Manager >= 1.3.0) Channel numbering is 0-15, corresponding to IEEE 2.4 GHz channels 11-26.
#
# \param testType 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: packet
# - 1: cm
# - 2: cw
# - 3: pkcca
# \param chanMask 2-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param repeatCnt 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param txPower 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# \param seqSize 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: 0
# - 1: 1
# - 2: 2
# - 3: 3
# - 4: 4
# - 5: 5
# - 6: 6
# - 7: 7
# - 8: 8
# - 9: 9
# - 10: 10
# \param pkLen_1 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_1 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_2 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_2 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_3 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_3 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_4 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_4 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_5 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_5 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_6 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_6 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_7 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_7 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_8 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_8 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_9 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_9 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param pkLen_10 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param delay_10 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param stationId 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_radiotestTx named tuple.
#
def dn_radiotestTx(self, testType, chanMask, repeatCnt, txPower, seqSize, pkLen_1, delay_1, pkLen_2, delay_2, pkLen_3, delay_3, pkLen_4, delay_4, pkLen_5, delay_5, pkLen_6, delay_6, pkLen_7, delay_7, pkLen_8, delay_8, pkLen_9, delay_9, pkLen_10, delay_10, stationId) :
res = IpMgrConnectorMuxInternal.send(self, ['radiotestTx'], {"testType" : testType, "chanMask" : chanMask, "repeatCnt" : repeatCnt, "txPower" : txPower, "seqSize" : seqSize, "pkLen_1" : pkLen_1, "delay_1" : delay_1, "pkLen_2" : pkLen_2, "delay_2" : delay_2, "pkLen_3" : pkLen_3, "delay_3" : delay_3, "pkLen_4" : pkLen_4, "delay_4" : delay_4, "pkLen_5" : pkLen_5, "delay_5" : delay_5, "pkLen_6" : pkLen_6, "delay_6" : delay_6, "pkLen_7" : pkLen_7, "delay_7" : delay_7, "pkLen_8" : pkLen_8, "delay_8" : delay_8, "pkLen_9" : pkLen_9, "delay_9" : delay_9, "pkLen_10" : pkLen_10, "delay_10" : delay_10, "stationId" : stationId})
return IpMgrConnectorMux.Tuple_dn_radiotestTx(**res)
##
# The named tuple returned by the dn_radiotestRx() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_radiotestRx = collections.namedtuple("Tuple_dn_radiotestRx", ['RC'])
##
# The radiotestRx command clears all previously collected statistics and initiates radio reception on the specified channel. It may only be executed if the manager has been booted up in radiotest mode (see setNetworkConfig command). During the test, the device keeps statistics about the number of packets received (with and without error). The test results may be retrieved using the getRadiotestStatistics command.
#
# The station ID is a user selectable value. It must be set to match the station ID used by the transmitter. Station ID is used to isolate traffic if multiple tests are running in the same radio space.
#
#
#
# Channel numbering is 0-15, corresponding to IEEE 2.4 GHz channels 11-26.
#
# \param mask 2-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param duration 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param stationId 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_radiotestRx named tuple.
#
def dn_radiotestRx(self, mask, duration, stationId) :
res = IpMgrConnectorMuxInternal.send(self, ['radiotestRx'], {"mask" : mask, "duration" : duration, "stationId" : stationId})
return IpMgrConnectorMux.Tuple_dn_radiotestRx(**res)
##
# The named tuple returned by the dn_getRadiotestStatistics() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>rxOk</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>rxFail</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getRadiotestStatistics = collections.namedtuple("Tuple_dn_getRadiotestStatistics", ['RC', 'rxOk', 'rxFail'])
##
# This command retrieves statistics from a previously run radiotestRx command. It may only be executed if the manager has been booted up in radiotest mode (see setNetworkConfig command).
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getRadiotestStatistics named tuple.
#
def dn_getRadiotestStatistics(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getRadiotestStatistics'], {})
return IpMgrConnectorMux.Tuple_dn_getRadiotestStatistics(**res)
##
# The named tuple returned by the dn_setACLEntry() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setACLEntry = collections.namedtuple("Tuple_dn_setACLEntry", ['RC'])
##
# The setACLEntry command adds a new entry or updates an existing entry in the Access Control List (ACL). This change is persistent. The maximum number of entries is 1,200.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param joinKey 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setACLEntry named tuple.
#
def dn_setACLEntry(self, macAddress, joinKey) :
res = IpMgrConnectorMuxInternal.send(self, ['setACLEntry'], {"macAddress" : macAddress, "joinKey" : joinKey})
return IpMgrConnectorMux.Tuple_dn_setACLEntry(**res)
##
# The named tuple returned by the dn_getNextACLEntry() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>joinKey</tt>: 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getNextACLEntry = collections.namedtuple("Tuple_dn_getNextACLEntry", ['RC', 'macAddress', 'joinKey'])
##
# The getNextACLEntry command returns information about next mote entry in the access control list (ACL). To begin a search (find the first mote in ACL), a zero MAC address (0000000000000000) should be sent. There is no mechanism for reading the ACL entry of a specific mote. This call is an iterator. If you call getNextACLEntry with mote A as the argument, your response is the ACL entry for mote B, where B is the next mote in the ACL.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getNextACLEntry named tuple.
#
def dn_getNextACLEntry(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['getNextACLEntry'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_getNextACLEntry(**res)
##
# The named tuple returned by the dn_deleteACLEntry() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_deleteACLEntry = collections.namedtuple("Tuple_dn_deleteACLEntry", ['RC'])
##
# The deleteACLEntry command deletes the specified mote from the access control list (ACL). If the macAddress parameter is set to all 0xFFs or all 0x00s, the entire ACL is cleared. This change is persistent.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_deleteACLEntry named tuple.
#
def dn_deleteACLEntry(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['deleteACLEntry'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_deleteACLEntry(**res)
##
# The named tuple returned by the dn_pingMote() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_pingMote = collections.namedtuple("Tuple_dn_pingMote", ['RC', 'callbackId'])
##
# The pingMote command sends a ping (echo request) to the mote specified by MAC address. A unique callbackId is generated and returned with the response. When the response is received from the mote, the manager generates a pingResponse notification with the measured round trip delay and several other parameters. The request is sent using unacknowledged transport, so the mote is not guaranteed to receive the request.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_pingMote named tuple.
#
def dn_pingMote(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['pingMote'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_pingMote(**res)
##
# The named tuple returned by the dn_getLog() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_getLog = collections.namedtuple("Tuple_dn_getLog", ['RC'])
##
# The getLog command retrieves diagnostic logs from the manager or a mote specified by MAC address.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getLog named tuple.
#
def dn_getLog(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['getLog'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_getLog(**res)
##
# The named tuple returned by the dn_sendData() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_sendData = collections.namedtuple("Tuple_dn_sendData", ['RC', 'callbackId'])
##
# The sendData command sends a packet to a mote in the network. The response contains a callbackId. When the manager injects the packet into the network, it will generate a packetSent notification. It is the responsibility of the customer's application layer at the mote to send a response. It is also the responsibility of the customer's application layer to timeout if no response is received at the manager if one is expected.
#
# The sendData command should be used by applications that communicate directly with the manager. If end-to-end (application to mote) IP connectivity is required, the application should use the sendIP command. For a more comprehensive discussion of the distinction, see the SmartMesh IP Network User Guide.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param priority 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: Low
# - 1: Medium
# - 2: High
# \param srcPort 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param dstPort 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param options 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param data None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_sendData named tuple.
#
def dn_sendData(self, macAddress, priority, srcPort, dstPort, options, data) :
res = IpMgrConnectorMuxInternal.send(self, ['sendData'], {"macAddress" : macAddress, "priority" : priority, "srcPort" : srcPort, "dstPort" : dstPort, "options" : options, "data" : data})
return IpMgrConnectorMux.Tuple_dn_sendData(**res)
##
# The named tuple returned by the dn_startNetwork() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_startNetwork = collections.namedtuple("Tuple_dn_startNetwork", ['RC'])
##
# The startNetwork command tells the manager to allow the network to start forming (begin accepting join requests from devices). The external application must issue the startNetwork command if the autoStartNetwork flag is not set (see setNetworkConfig).
#
# This command has been deprecated and should not be used in new designs.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_startNetwork named tuple.
#
def dn_startNetwork(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['startNetwork'], {})
return IpMgrConnectorMux.Tuple_dn_startNetwork(**res)
##
# The named tuple returned by the dn_getSystemInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>hwModel</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>hwRev</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>swMajor</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>swMinor</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>swPatch</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>swBuild</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getSystemInfo = collections.namedtuple("Tuple_dn_getSystemInfo", ['RC', 'macAddress', 'hwModel', 'hwRev', 'swMajor', 'swMinor', 'swPatch', 'swBuild'])
##
# The getSystemInfo command returns system-level information about the hardware and software versions.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getSystemInfo named tuple.
#
def dn_getSystemInfo(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getSystemInfo'], {})
return IpMgrConnectorMux.Tuple_dn_getSystemInfo(**res)
##
# The named tuple returned by the dn_getMoteConfig() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>isAP</tt>: 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
# - <tt>state</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: lost
# - 1: negotiating
# - 4: operational
# - <tt>reserved</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>isRouting</tt>: 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getMoteConfig = collections.namedtuple("Tuple_dn_getMoteConfig", ['RC', 'macAddress', 'moteId', 'isAP', 'state', 'reserved', 'isRouting'])
##
# The getMoteConfig command returns a single mote description as the response. The command takes two arguments, a MAC Address and a flag indicating whether the MAC Address refers to the requested mote or to the next mote in managers memory. This command may be used to iterate through all motes known by the manager by starting with the macAddress parameter set to 0 and next set to true, and then using the MAC Address of that response as the input to the next call.
#
# The mote MAC address is used in all query commands, but space constraints require the neighbor health reports to use the Mote ID for identification. Therefore, both identifiers are present in the mote structure.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param next 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getMoteConfig named tuple.
#
def dn_getMoteConfig(self, macAddress, next) :
res = IpMgrConnectorMuxInternal.send(self, ['getMoteConfig'], {"macAddress" : macAddress, "next" : next})
return IpMgrConnectorMux.Tuple_dn_getMoteConfig(**res)
##
# The named tuple returned by the dn_getPathInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>source</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>dest</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>direction</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: none
# - 1: unused
# - 2: upstream
# - 3: downstream
# - <tt>numLinks</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>quality</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>rssiSrcDest</tt>: 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# - <tt>rssiDestSrc</tt>: 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getPathInfo = collections.namedtuple("Tuple_dn_getPathInfo", ['RC', 'source', 'dest', 'direction', 'numLinks', 'quality', 'rssiSrcDest', 'rssiDestSrc'])
##
# The getPathInfo command returns parameters of requested path.
#
# \param source 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param dest 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getPathInfo named tuple.
#
def dn_getPathInfo(self, source, dest) :
res = IpMgrConnectorMuxInternal.send(self, ['getPathInfo'], {"source" : source, "dest" : dest})
return IpMgrConnectorMux.Tuple_dn_getPathInfo(**res)
##
# The named tuple returned by the dn_getNextPathInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>pathId</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>source</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>dest</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>direction</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: none
# - 1: unused
# - 2: upstream
# - 3: downstream
# - <tt>numLinks</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>quality</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>rssiSrcDest</tt>: 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# - <tt>rssiDestSrc</tt>: 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getNextPathInfo = collections.namedtuple("Tuple_dn_getNextPathInfo", ['RC', 'pathId', 'source', 'dest', 'direction', 'numLinks', 'quality', 'rssiSrcDest', 'rssiDestSrc'])
##
# The getNextPathInfo command allows iteration across paths connected to a particular mote. The pathId parameter indicates the previous value in the iteration. Setting pathId to 0 returns the first path. A pathId can not be used as a unique identifier for a path. It is only valid when associated with a particular mote.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param filter 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: all
# - 1: upstream
# \param pathId 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getNextPathInfo named tuple.
#
def dn_getNextPathInfo(self, macAddress, filter, pathId) :
res = IpMgrConnectorMuxInternal.send(self, ['getNextPathInfo'], {"macAddress" : macAddress, "filter" : filter, "pathId" : pathId})
return IpMgrConnectorMux.Tuple_dn_getNextPathInfo(**res)
##
# The named tuple returned by the dn_setAdvertising() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_setAdvertising = collections.namedtuple("Tuple_dn_setAdvertising", ['RC', 'callbackId'])
##
# The setAdvertising command tells the manager to activate, deactivate, or use slow advertising. The response is a callbackId. A commandFinished notification with the callbackId is generated when the command propagation is complete.
#
# With motes prior to version 1.4.1, it is only possible to turn advertising ON or OFF. If building networks consisting primarily of motes 1.4.1 or later, power can be saved by setting advertising to "slow". Set the INI parameter advtimeout to a value (in ms) and set this command to 0.
#
# For example, the default full advertising frequency is approximately once per 2 seconds. It is recommended to set advtimeout = 20000, which will result in an advertising every 20 seconds which will result in a 90% power savings in the cost of advertising.
#
# It is dangerous to turn off advertising in the network. When advertising is off, new motes can not join and existing motes can not rejoin the network after a reset. Turning off advertising is primarily used to save power, or may be useful in for specific use cases where it is desirable to prevent motes from joining the network. In most cases, it is best to allow advertising to remain under the control of the manager.
#
# \param activate 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: on
# - 1: off
#
# \returns The response to the command, formatted as a #Tuple_dn_setAdvertising named tuple.
#
def dn_setAdvertising(self, activate) :
res = IpMgrConnectorMuxInternal.send(self, ['setAdvertising'], {"activate" : activate})
return IpMgrConnectorMux.Tuple_dn_setAdvertising(**res)
##
# The named tuple returned by the dn_setDownstreamFrameMode() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_setDownstreamFrameMode = collections.namedtuple("Tuple_dn_setDownstreamFrameMode", ['RC', 'callbackId'])
##
# The setDownstreamFrameMode command tells the manager to shorten or extend the downstream slotframe. The base slotframe length will be multiplied by the downFrameMultVal for "normal" speed. For "fast" speed the downstream slotframe is the base length. Once this command is executed, the manager switches to manual mode and no longer changes slotframe size automatically. The response is a callbackId. A commandFinished notification with the callbackId is generated when the command propagation is complete.
#
# \param frameMode 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: normal
# - 1: fast
#
# \returns The response to the command, formatted as a #Tuple_dn_setDownstreamFrameMode named tuple.
#
def dn_setDownstreamFrameMode(self, frameMode) :
res = IpMgrConnectorMuxInternal.send(self, ['setDownstreamFrameMode'], {"frameMode" : frameMode})
return IpMgrConnectorMux.Tuple_dn_setDownstreamFrameMode(**res)
##
# The named tuple returned by the dn_getManagerStatistics() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>serTxCnt</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>serRxCnt</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>serRxCRCErr</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>serRxOverruns</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiEstabConn</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiDroppedConn</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiTxOk</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiTxErr</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiTxFail</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiRxOk</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apiRxProtErr</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getManagerStatistics = collections.namedtuple("Tuple_dn_getManagerStatistics", ['RC', 'serTxCnt', 'serRxCnt', 'serRxCRCErr', 'serRxOverruns', 'apiEstabConn', 'apiDroppedConn', 'apiTxOk', 'apiTxErr', 'apiTxFail', 'apiRxOk', 'apiRxProtErr'])
##
# The getManagerStatistics command returns dynamic information and statistics about the manager API. The statistics counts are cleared together with all current statistics using clearStatistics.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getManagerStatistics named tuple.
#
def dn_getManagerStatistics(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getManagerStatistics'], {})
return IpMgrConnectorMux.Tuple_dn_getManagerStatistics(**res)
##
# The named tuple returned by the dn_setTime() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setTime = collections.namedtuple("Tuple_dn_setTime", ['RC'])
##
# This command has been deprecated, and should not be used in new designs. When the Manager restarts, it will start counting from 20:00:00 UTC July 2, 2002.
#
# The setTime command sets the UTC time on the manager. This command may only be executed when the network is not running. If the trigger flag is false, the manager sets the specified time as soon as it receives the setTime command. When the manager receives a Time Pin trigger, it temporarily stores the current time. If a setTime request is received within a short period of time following the trigger, the manager calculates the delay since the trigger and adjust the time such that the trigger was received at the specified time value.
#
# \param trigger 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param utcSecs 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param utcUsecs 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setTime named tuple.
#
def dn_setTime(self, trigger, utcSecs, utcUsecs) :
res = IpMgrConnectorMuxInternal.send(self, ['setTime'], {"trigger" : trigger, "utcSecs" : utcSecs, "utcUsecs" : utcUsecs})
return IpMgrConnectorMux.Tuple_dn_setTime(**res)
##
# The named tuple returned by the dn_getLicense() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>license</tt>: 13-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getLicense = collections.namedtuple("Tuple_dn_getLicense", ['RC', 'license'])
##
# The getLicense command has been deprecated in Manager >= 1.3.0.There is no need to use a license to enable > 32 mote networks.
#
# The getLicense command returns the current license key.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getLicense named tuple.
#
def dn_getLicense(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getLicense'], {})
return IpMgrConnectorMux.Tuple_dn_getLicense(**res)
##
# The named tuple returned by the dn_setLicense() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setLicense = collections.namedtuple("Tuple_dn_setLicense", ['RC'])
##
# The setLicense command has been deprecated in Manager >= 1.3.0. There is no longer a need to use a license to enable > 32 mote networks.
#
# The setLicense command validates and updates the software license key stored in flash. Features enabled or disabled by the license key change will take effect after the device is restarted. If the license parameter is set to all 0x0s, the manager restores the default license. This change is persistent.
#
# \param license 13-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setLicense named tuple.
#
def dn_setLicense(self, license) :
res = IpMgrConnectorMuxInternal.send(self, ['setLicense'], {"license" : license})
return IpMgrConnectorMux.Tuple_dn_setLicense(**res)
##
# The named tuple returned by the dn_setCLIUser() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setCLIUser = collections.namedtuple("Tuple_dn_setCLIUser", ['RC'])
##
# The setCLIUser command sets the password that must be used to log into the command line for a particular user role. The user roles are:
#
# - Viewer - read-only access to non-sensitive information
# - User - read-write access This change is persistent.
#
# \param role 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: viewer
# - 1: user
# \param password 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setCLIUser named tuple.
#
def dn_setCLIUser(self, role, password) :
res = IpMgrConnectorMuxInternal.send(self, ['setCLIUser'], {"role" : role, "password" : password})
return IpMgrConnectorMux.Tuple_dn_setCLIUser(**res)
##
# The named tuple returned by the dn_sendIP() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>callbackId</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_sendIP = collections.namedtuple("Tuple_dn_sendIP", ['RC', 'callbackId'])
##
# The sendIP command sends a 6LoWPAN packet to a mote in the network. The response contains a callback Id. When the manager injects the packet into the network, it will generate a packetSent notification with the calllbackId. The application is responsible for constructing a valid 6LoWPAN packet. The packet is sent to the mote best-effort, so the application should deal with responses and timeouts, if any.
#
# The sendIP command should be used by applications that require end-to-end IP connectivity. For applications that do not require end-to-end IP connectivity, the sendData command provides a simpler interface without requiring the application to understand 6LoWPAN encapsulation. For a more comprehensive discussion of the distinction, see the SmartMesh IP Network User Guide.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param priority 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: Low
# - 1: Medium
# - 2: High
# \param options 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param encryptedOffset 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param data None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_sendIP named tuple.
#
def dn_sendIP(self, macAddress, priority, options, encryptedOffset, data) :
res = IpMgrConnectorMuxInternal.send(self, ['sendIP'], {"macAddress" : macAddress, "priority" : priority, "options" : options, "encryptedOffset" : encryptedOffset, "data" : data})
return IpMgrConnectorMux.Tuple_dn_sendIP(**res)
##
# The named tuple returned by the dn_restoreFactoryDefaults() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_restoreFactoryDefaults = collections.namedtuple("Tuple_dn_restoreFactoryDefaults", ['RC'])
##
# The restoreFactoryDefaults command restores the default configuration and clears the ACL. This change is persistent.
#
# For Manager versions <1.3.0 that required a license, the license used to enable optional features is preserved during a restore.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_restoreFactoryDefaults named tuple.
#
def dn_restoreFactoryDefaults(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['restoreFactoryDefaults'], {})
return IpMgrConnectorMux.Tuple_dn_restoreFactoryDefaults(**res)
##
# The named tuple returned by the dn_getMoteInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>state</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: lost
# - 1: negotiating
# - 4: operational
# - <tt>numNbrs</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numGoodNbrs</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>requestedBw</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>totalNeededBw</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>assignedBw</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>packetsReceived</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>packetsLost</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>avgLatency</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>stateTime</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numJoins</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>hopDepth</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getMoteInfo = collections.namedtuple("Tuple_dn_getMoteInfo", ['RC', 'macAddress', 'state', 'numNbrs', 'numGoodNbrs', 'requestedBw', 'totalNeededBw', 'assignedBw', 'packetsReceived', 'packetsLost', 'avgLatency', 'stateTime', 'numJoins', 'hopDepth'])
##
# The getMoteInfo command returns dynamic information for the specified mote.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getMoteInfo named tuple.
#
def dn_getMoteInfo(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['getMoteInfo'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_getMoteInfo(**res)
##
# The named tuple returned by the dn_getNetworkConfig() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>networkId</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apTxPower</tt>: 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# - <tt>frameProfile</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 1: Profile_01
# - <tt>maxMotes</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>baseBandwidth</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>downFrameMultVal</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numParents</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>ccaMode</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: off
# - 1: energy
# - 2: carrier
# - 3: both
# - <tt>channelList</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>autoStartNetwork</tt>: 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
# - <tt>locMode</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>bbMode</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: off
# - 1: upstream
# - 2: bidirectional
# - <tt>bbSize</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>isRadioTest</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>bwMult</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>oneChannel</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getNetworkConfig = collections.namedtuple("Tuple_dn_getNetworkConfig", ['RC', 'networkId', 'apTxPower', 'frameProfile', 'maxMotes', 'baseBandwidth', 'downFrameMultVal', 'numParents', 'ccaMode', 'channelList', 'autoStartNetwork', 'locMode', 'bbMode', 'bbSize', 'isRadioTest', 'bwMult', 'oneChannel'])
##
# The getNetworkConfig command returns general network configuration parameters, including the Network ID, bandwidth parameters and number of motes.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getNetworkConfig named tuple.
#
def dn_getNetworkConfig(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getNetworkConfig'], {})
return IpMgrConnectorMux.Tuple_dn_getNetworkConfig(**res)
##
# The named tuple returned by the dn_getNetworkInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>numMotes</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>asnSize</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>advertisementState</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: on
# - 1: off
# - <tt>downFrameState</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: normal
# - 1: fast
# - <tt>netReliability</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>netPathStability</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>netLatency</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>netState</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: operational
# - 1: radiotest
# - 2: notStarted
# - 3: errorStartup
# - 4: errorConfig
# - 5: errorLicense
# - <tt>ipv6Address</tt>: 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>numLostPackets</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numArrivedPackets</tt>: 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>maxNumbHops</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getNetworkInfo = collections.namedtuple("Tuple_dn_getNetworkInfo", ['RC', 'numMotes', 'asnSize', 'advertisementState', 'downFrameState', 'netReliability', 'netPathStability', 'netLatency', 'netState', 'ipv6Address', 'numLostPackets', 'numArrivedPackets', 'maxNumbHops'])
##
# The getNetworkInfo command returns dynamic network information and statistics.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getNetworkInfo named tuple.
#
def dn_getNetworkInfo(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getNetworkInfo'], {})
return IpMgrConnectorMux.Tuple_dn_getNetworkInfo(**res)
##
# The named tuple returned by the dn_getMoteConfigById() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>isAP</tt>: 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
# - <tt>state</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: lost
# - 1: negotiating
# - 4: operational
# - <tt>reserved</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>isRouting</tt>: 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getMoteConfigById = collections.namedtuple("Tuple_dn_getMoteConfigById", ['RC', 'macAddress', 'moteId', 'isAP', 'state', 'reserved', 'isRouting'])
##
# The getMoteConfigById command returns a single mote description as the response. The command takes one argument, the short address of a mote (Mote ID). The command returns the same response structure as the getMoteConfig command.
#
# \param moteId 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getMoteConfigById named tuple.
#
def dn_getMoteConfigById(self, moteId) :
res = IpMgrConnectorMuxInternal.send(self, ['getMoteConfigById'], {"moteId" : moteId})
return IpMgrConnectorMux.Tuple_dn_getMoteConfigById(**res)
##
# The named tuple returned by the dn_setCommonJoinKey() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setCommonJoinKey = collections.namedtuple("Tuple_dn_setCommonJoinKey", ['RC'])
##
# The setCommonJoinKey command will set a new value for the common join key. The common join key is used to decrypt join messages only if the ACL is empty.
#
# \param key 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setCommonJoinKey named tuple.
#
def dn_setCommonJoinKey(self, key) :
res = IpMgrConnectorMuxInternal.send(self, ['setCommonJoinKey'], {"key" : key})
return IpMgrConnectorMux.Tuple_dn_setCommonJoinKey(**res)
##
# The named tuple returned by the dn_getIPConfig() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>ipv6Address</tt>: 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>mask</tt>: 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getIPConfig = collections.namedtuple("Tuple_dn_getIPConfig", ['RC', 'ipv6Address', 'mask'])
##
# The getIPConfig command returns the manager's IP configuration parameters, including the IPv6 address and mask.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getIPConfig named tuple.
#
def dn_getIPConfig(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getIPConfig'], {})
return IpMgrConnectorMux.Tuple_dn_getIPConfig(**res)
##
# The named tuple returned by the dn_setIPConfig() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_setIPConfig = collections.namedtuple("Tuple_dn_setIPConfig", ['RC'])
##
# The setIPConfig command sets the IPv6 prefix of the mesh network. Only the upper 8 bytes of the IPv6 address are relevant: the lower 8 bytes of the IPv6 address are ignored, and lower 8 bytes of the mask field are reserved and should be set to 0. This change is persistent.
#
# \param ipv6Address 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param mask 16-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setIPConfig named tuple.
#
def dn_setIPConfig(self, ipv6Address, mask) :
res = IpMgrConnectorMuxInternal.send(self, ['setIPConfig'], {"ipv6Address" : ipv6Address, "mask" : mask})
return IpMgrConnectorMux.Tuple_dn_setIPConfig(**res)
##
# The named tuple returned by the dn_deleteMote() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_deleteMote = collections.namedtuple("Tuple_dn_deleteMote", ['RC'])
##
# The deleteMote command deletes a mote from the manager's list. A mote can only be deleted if it is in the Lost state.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_deleteMote named tuple.
#
def dn_deleteMote(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['deleteMote'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_deleteMote(**res)
##
# The named tuple returned by the dn_getMoteLinks() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
# - <tt>idx</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utilization</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numLinks</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_1</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_1</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_1</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_1</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_1</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_2</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_2</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_2</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_2</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_2</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_3</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_3</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_3</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_3</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_3</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_4</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_4</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_4</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_4</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_4</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_5</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_5</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_5</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_5</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_5</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_6</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_6</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_6</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_6</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_6</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_7</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_7</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_7</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_7</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_7</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_8</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_8</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_8</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_8</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_8</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_9</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_9</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_9</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_9</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_9</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>frameId_10</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>slot_10</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>channelOffset_10</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId_10</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>flags_10</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getMoteLinks = collections.namedtuple("Tuple_dn_getMoteLinks", ['RC', 'idx', 'utilization', 'numLinks', 'frameId_1', 'slot_1', 'channelOffset_1', 'moteId_1', 'flags_1', 'frameId_2', 'slot_2', 'channelOffset_2', 'moteId_2', 'flags_2', 'frameId_3', 'slot_3', 'channelOffset_3', 'moteId_3', 'flags_3', 'frameId_4', 'slot_4', 'channelOffset_4', 'moteId_4', 'flags_4', 'frameId_5', 'slot_5', 'channelOffset_5', 'moteId_5', 'flags_5', 'frameId_6', 'slot_6', 'channelOffset_6', 'moteId_6', 'flags_6', 'frameId_7', 'slot_7', 'channelOffset_7', 'moteId_7', 'flags_7', 'frameId_8', 'slot_8', 'channelOffset_8', 'moteId_8', 'flags_8', 'frameId_9', 'slot_9', 'channelOffset_9', 'moteId_9', 'flags_9', 'frameId_10', 'slot_10', 'channelOffset_10', 'moteId_10', 'flags_10'])
##
# The getMoteLinks command returns information about links assigned to the mote. The response contains a list of links starting with Nth link on the mote, where N is supplied as the idx parameter in the request. To retrieve all links on the device the user can call this command with idx that increments by number of links returned with prior response, until the command returns RC_END_OF_LIST response code. Note that links assigned to a mote may change between API calls.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param idx 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getMoteLinks named tuple.
#
def dn_getMoteLinks(self, macAddress, idx) :
res = IpMgrConnectorMuxInternal.send(self, ['getMoteLinks'], {"macAddress" : macAddress, "idx" : idx})
return IpMgrConnectorMux.Tuple_dn_getMoteLinks(**res)
##
# The named tuple returned by the dn_radiotestRxPER() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_radiotestRxPER = collections.namedtuple("Tuple_dn_radiotestRxPER", ['RC'])
##
# The radiotestRxPER command initiates the Packet Error Rate (PER) test in RX mode. This command may be issued only if the manager has been booted up in radiotest mode (see setNetworkConfig command).
#
# This command is available in the SmartMesh IP Manager version 1.4.2 or later.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_radiotestRxPER named tuple.
#
def dn_radiotestRxPER(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['radiotestRxPER'], {})
return IpMgrConnectorMux.Tuple_dn_radiotestRxPER(**res)
##
# The named tuple returned by the dn_radiotestTxPER() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - 19: RC_UNSUPPORTED
#
Tuple_dn_radiotestTxPER = collections.namedtuple("Tuple_dn_radiotestTxPER", ['RC'])
##
# The radiotestTxPER command initiates the Packet Error Rate (PER) test in TX mode. This command may be issued only if the manager has been booted up in radiotest mode (see setNetworkConfig command).
# Channel numbering is 0-15, corresponding to IEEE 2.4 GHz channels 11-26.
#
# This command is available in the SmartMesh IP Manager version 1.4.2 or later.
#
# \param txPower 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# \param numPackets 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param chanMask 2-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param numRepeat 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_radiotestTxPER named tuple.
#
def dn_radiotestTxPER(self, txPower, numPackets, chanMask, numRepeat) :
res = IpMgrConnectorMuxInternal.send(self, ['radiotestTxPER'], {"txPower" : txPower, "numPackets" : numPackets, "chanMask" : chanMask, "numRepeat" : numRepeat})
return IpMgrConnectorMux.Tuple_dn_radiotestTxPER(**res)
#======================== notifications ===================================
##
# Dictionary of all notification tuples.
#
notifTupleTable = {}
##
# \brief MANAGER_HELLO notification.
#
# Sent by the manager to a initiate new session with a client.
#
# Formatted as a Tuple_manager_hello named tuple. It contains the following fields:
# - <tt>version</tt> 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>mode</tt> 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
MANAGER_HELLO = "manager_hello"
notifTupleTable[MANAGER_HELLO] = Tuple_manager_hello = collections.namedtuple("Tuple_manager_hello", ['version', 'mode'])
##
# \brief EVENTMOTERESET notification.
#
# This notification is sent when a user-initiated reset is executed by the manager.
#
# Formatted as a Tuple_eventMoteReset named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTERESET = "eventMoteReset"
notifTupleTable[EVENTMOTERESET] = Tuple_eventMoteReset = collections.namedtuple("Tuple_eventMoteReset", ['eventId', 'macAddress'])
##
# \brief EVENTNETWORKRESET notification.
#
# This notification is sent when the manager starts the network. This event has no eventData fields.
#
# Formatted as a Tuple_eventNetworkReset named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTNETWORKRESET = "eventNetworkReset"
notifTupleTable[EVENTNETWORKRESET] = Tuple_eventNetworkReset = collections.namedtuple("Tuple_eventNetworkReset", ['eventId'])
##
# \brief EVENTCOMMANDFINISHED notification.
#
# The commandFinished notification is sent when a command associated with the provided callback id finishes executing.
#
# Formatted as a Tuple_eventCommandFinished named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>callbackId</tt> 4-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>rc</tt> 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: OK
# - 1: nack
# - 2: commandTimeout
#
EVENTCOMMANDFINISHED = "eventCommandFinished"
notifTupleTable[EVENTCOMMANDFINISHED] = Tuple_eventCommandFinished = collections.namedtuple("Tuple_eventCommandFinished", ['eventId', 'callbackId', 'rc'])
##
# \brief EVENTMOTEJOIN notification.
#
# This notification is sent when a mote joins the network.
#
# Formatted as a Tuple_eventMoteJoin named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTEJOIN = "eventMoteJoin"
notifTupleTable[EVENTMOTEJOIN] = Tuple_eventMoteJoin = collections.namedtuple("Tuple_eventMoteJoin", ['eventId', 'macAddress'])
##
# \brief EVENTMOTEOPERATIONAL notification.
#
# This notification is sent when a mote that joins the network becomes operational.
#
# Formatted as a Tuple_eventMoteOperational named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTEOPERATIONAL = "eventMoteOperational"
notifTupleTable[EVENTMOTEOPERATIONAL] = Tuple_eventMoteOperational = collections.namedtuple("Tuple_eventMoteOperational", ['eventId', 'macAddress'])
##
# \brief EVENTMOTELOST notification.
#
# This notification is sent when a mote's state changes to Lost , which indicates that the mote is not responding to downstream messages.
#
# Formatted as a Tuple_eventMoteLost named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTELOST = "eventMoteLost"
notifTupleTable[EVENTMOTELOST] = Tuple_eventMoteLost = collections.namedtuple("Tuple_eventMoteLost", ['eventId', 'macAddress'])
##
# \brief EVENTNETWORKTIME notification.
#
# The time notification is triggered by the client asserting the TIME pin or by calling the getTime command. This notification contains the time when the TIME pin was asserted (or the getTime command was processed) expressed as:
#
# - ASN The absolute slot number (the number of timeslots since " 7/2/2002 8:00:00 PM PST" if UTC is set on manager, otherwise since Jan 1, 1970)
#
#
# - Uptime The number of seconds since the device was booted
# - Unix time The number of seconds and microseconds since Jan 1, 1970 in UTC
#
# Formatted as a Tuple_eventNetworkTime named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>uptime</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcSecs</tt> 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcUsecs</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>asn</tt> 5-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>asnOffset</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTNETWORKTIME = "eventNetworkTime"
notifTupleTable[EVENTNETWORKTIME] = Tuple_eventNetworkTime = collections.namedtuple("Tuple_eventNetworkTime", ['eventId', 'uptime', 'utcSecs', 'utcUsecs', 'asn', 'asnOffset'])
##
# \brief EVENTPINGRESPONSE notification.
#
# This notification is sent when a reply is received from a mote ping.
#
# Formatted as a Tuple_eventPingResponse named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>callbackId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>delay</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>voltage</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>temperature</tt> 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
#
EVENTPINGRESPONSE = "eventPingResponse"
notifTupleTable[EVENTPINGRESPONSE] = Tuple_eventPingResponse = collections.namedtuple("Tuple_eventPingResponse", ['eventId', 'callbackId', 'macAddress', 'delay', 'voltage', 'temperature'])
##
# \brief EVENTPATHCREATE notification.
#
# This notification is sent when the manager creates a connection (path) between two motes.
#
# Formatted as a Tuple_eventPathCreate named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>source</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>dest</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>direction</tt> 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: none
# - 1: unused
# - 2: upstream
# - 3: downstream
#
EVENTPATHCREATE = "eventPathCreate"
notifTupleTable[EVENTPATHCREATE] = Tuple_eventPathCreate = collections.namedtuple("Tuple_eventPathCreate", ['eventId', 'source', 'dest', 'direction'])
##
# \brief EVENTPATHDELETE notification.
#
# This notification is sent when the manager removes a connection (path) between two motes.
#
# Formatted as a Tuple_eventPathDelete named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>source</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>dest</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>direction</tt> 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: none
# - 1: unused
# - 2: upstream
# - 3: downstream
#
EVENTPATHDELETE = "eventPathDelete"
notifTupleTable[EVENTPATHDELETE] = Tuple_eventPathDelete = collections.namedtuple("Tuple_eventPathDelete", ['eventId', 'source', 'dest', 'direction'])
##
# \brief EVENTPACKETSENT notification.
#
# The packetSent notification is generated when client's packet is removed from manager's queue and sent into the wireless network.
#
# Formatted as a Tuple_eventPacketSent named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>callbackId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>rc</tt> 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTPACKETSENT = "eventPacketSent"
notifTupleTable[EVENTPACKETSENT] = Tuple_eventPacketSent = collections.namedtuple("Tuple_eventPacketSent", ['eventId', 'callbackId', 'rc'])
##
# \brief EVENTMOTECREATE notification.
#
# This event is sent when a mote joins the manager for the first time.
#
# Formatted as a Tuple_eventMoteCreate named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTECREATE = "eventMoteCreate"
notifTupleTable[EVENTMOTECREATE] = Tuple_eventMoteCreate = collections.namedtuple("Tuple_eventMoteCreate", ['eventId', 'macAddress', 'moteId'])
##
# \brief EVENTMOTEDELETE notification.
#
# This notification is sent when a mote is deleted as a result of moteDelete command.
#
# Formatted as a Tuple_eventMoteDelete named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>moteId</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTEDELETE = "eventMoteDelete"
notifTupleTable[EVENTMOTEDELETE] = Tuple_eventMoteDelete = collections.namedtuple("Tuple_eventMoteDelete", ['eventId', 'macAddress', 'moteId'])
##
# \brief EVENTJOINFAILED notification.
#
# The joinFailed event is generated when a mote sends a join request to the manager but the request can not be validated. This notification is available in Manager 1.4.1 or later.
#
# Formatted as a Tuple_eventJoinFailed named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>reason</tt> 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: counter
# - 1: notOnACL
# - 2: authentication
# - 3: unexpected
#
EVENTJOINFAILED = "eventJoinFailed"
notifTupleTable[EVENTJOINFAILED] = Tuple_eventJoinFailed = collections.namedtuple("Tuple_eventJoinFailed", ['eventId', 'macAddress', 'reason'])
##
# \brief EVENTINVALIDMIC notification.
#
# The invalidMIC event is generated when a packet that the manager receives from a mote in the network fails decryption. This notification is available in Manager 1.4.1 or later.
#
# Formatted as a Tuple_eventInvalidMIC named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTINVALIDMIC = "eventInvalidMIC"
notifTupleTable[EVENTINVALIDMIC] = Tuple_eventInvalidMIC = collections.namedtuple("Tuple_eventInvalidMIC", ['eventId', 'macAddress'])
##
# \brief NOTIFLOG notification.
#
# A log notifications is generated in response to the getLog command. Each log notification contains a message from the mote's log.
#
# Formatted as a Tuple_notifLog named tuple. It contains the following fields:
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>logMsg</tt> None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
NOTIFLOG = "notifLog"
notifTupleTable[NOTIFLOG] = Tuple_notifLog = collections.namedtuple("Tuple_notifLog", ['macAddress', 'logMsg'])
##
# \brief NOTIFDATA notification.
#
# The data notification contains a header and a variable length array of binary data. The length of the data is determined based on the length of the notification.
#
# The manager forwards all packets received on its IP address and non-manager ports as data notifications.
#
# Formatted as a Tuple_notifData named tuple. It contains the following fields:
# - <tt>utcSecs</tt> 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcUsecs</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>srcPort</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>dstPort</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>data</tt> None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
NOTIFDATA = "notifData"
notifTupleTable[NOTIFDATA] = Tuple_notifData = collections.namedtuple("Tuple_notifData", ['utcSecs', 'utcUsecs', 'macAddress', 'srcPort', 'dstPort', 'data'])
##
# \brief NOTIFIPDATA notification.
#
# The ipData notification contains full IP packet sent by the mote, including 6LoWPAN header, UDP header, and the UDP payload. Manager generates this notification when it receives packet from a mote with destination other than manager's own IP address. The size of the data field can be calculated by subtracting the fixed header size (up to macAddress) from the size of overall notification packet.
#
# Formatted as a Tuple_notifIpData named tuple. It contains the following fields:
# - <tt>utcSecs</tt> 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcUsecs</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>data</tt> None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
NOTIFIPDATA = "notifIpData"
notifTupleTable[NOTIFIPDATA] = Tuple_notifIpData = collections.namedtuple("Tuple_notifIpData", ['utcSecs', 'utcUsecs', 'macAddress', 'data'])
##
# \brief NOTIFHEALTHREPORT notification.
#
# The healthReport notifications include the raw payload of health reports received from devices. The payload contains one or more specific health report messages. Each message contains an identifier, length and variable-sized data. The individual healthReport message structures are defined below.
#
# Formatted as a Tuple_notifHealthReport named tuple. It contains the following fields:
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>payload</tt> None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
NOTIFHEALTHREPORT = "notifHealthReport"
notifTupleTable[NOTIFHEALTHREPORT] = Tuple_notifHealthReport = collections.namedtuple("Tuple_notifHealthReport", ['macAddress', 'payload'])
##
# \brief NOTIFRADIOTESTSTATSPER notification.
#
# The testRadioStatsPER notification is generated by the manager when PER test in RX mode is completed.
#
# This command is available in the SmartMesh IP Manager version 1.4.2 or later.
#
# Formatted as a Tuple_notifRadiotestStatsPER named tuple. It contains the following fields:
# - <tt>numRxOK</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numRxErr</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numRxInv</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numRxMiss</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>perInt</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>perFrac</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
NOTIFRADIOTESTSTATSPER = "notifRadiotestStatsPER"
notifTupleTable[NOTIFRADIOTESTSTATSPER] = Tuple_notifRadiotestStatsPER = collections.namedtuple("Tuple_notifRadiotestStatsPER", ['numRxOK', 'numRxErr', 'numRxInv', 'numRxMiss', 'perInt', 'perFrac'])
##
# \brief Get a notification from the notification queue, and returns
# it properly formatted.
#
# \exception NotificationError if unknown notification.
#
def getNotification(self, timeoutSec=-1) :
temp = self.getNotificationInternal(timeoutSec)
if not temp:
return temp
(ids, param) = temp
try :
if IpMgrConnectorMux.notifTupleTable[ids[-1]] :
return (ids[-1], IpMgrConnectorMux.notifTupleTable[ids[-1]](**param))
else :
return (ids[-1], None)
except KeyError :
raise ApiException.NotificationError(ids, param)
##
# end of IpMgrConnectorMux
# \}
#
| {
"content_hash": "1f97d9e83598ec3d012d33ee17b2f731",
"timestamp": "",
"source": "github",
"line_count": 2394,
"max_line_length": 772,
"avg_line_length": 50.225563909774436,
"alnum_prop": 0.6329258150365935,
"repo_name": "realms-team/basestation-fw",
"id": "111d5d973df233dc555e30ed60836edad12e26a9",
"size": "120240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/smartmeshsdk-REL-1.3.0.1/libs/SmartMeshSDK/IpMgrConnectorMux/IpMgrConnectorMux.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1951087"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
def gen_uuid(apps, schema_editor):
MyModel = apps.get_model('aggregator', 'Layer')
for row in MyModel.objects.all():
row.uuid = uuid.uuid4()
row.save()
MyModel = apps.get_model('aggregator', 'Service')
for row in MyModel.objects.all():
row.uuid = uuid.uuid4()
row.save()
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0005_sync_models'),
]
operations = [
migrations.RunPython(gen_uuid, reverse_code=migrations.RunPython.noop),
]
| {
"content_hash": "cf20ed09bc17fe0c4075690c4c0d15ee",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6424050632911392,
"repo_name": "cga-harvard/hypermap",
"id": "78fee73cc5c5093ac0d1e3267806694f9d7ac6a6",
"size": "656",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hypermap/aggregator/migrations/0006_populate_uuid_values.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139270"
},
{
"name": "HTML",
"bytes": "28842"
},
{
"name": "Python",
"bytes": "81613"
},
{
"name": "Ruby",
"bytes": "794"
},
{
"name": "Shell",
"bytes": "522"
}
],
"symlink_target": ""
} |
"""Test the ref_resolver module."""
from __future__ import absolute_import
import shutil
import tempfile
import pytest # type: ignore
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
def test_Loader_initialisation_for_HOME_env_var(tmp_dir_fixture):
import os
from schema_salad.ref_resolver import Loader
from requests import Session
# Ensure HOME is set.
os.environ["HOME"] = tmp_dir_fixture
loader = Loader(ctx={})
assert isinstance(loader.session, Session)
def test_Loader_initialisation_for_TMP_env_var(tmp_dir_fixture):
import os
from schema_salad.ref_resolver import Loader
from requests import Session
# Ensure HOME is missing.
if "HOME" in os.environ:
del os.environ["HOME"]
# Ensure TMP is present.
os.environ["TMP"] = tmp_dir_fixture
loader = Loader(ctx={})
assert isinstance(loader.session, Session)
def test_Loader_initialisation_with_neither_TMP_HOME_set(tmp_dir_fixture):
import os
from schema_salad.ref_resolver import Loader
from requests import Session
# Ensure HOME is missing.
if "HOME" in os.environ:
del os.environ["HOME"]
if "TMP" in os.environ:
del os.environ["TMP"]
loader = Loader(ctx={})
assert isinstance(loader.session, Session)
| {
"content_hash": "81838f8c2ae69e652cee8f4d5cf225ac",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 74,
"avg_line_length": 25.418181818181818,
"alnum_prop": 0.6831187410586552,
"repo_name": "mr-c/common-workflow-language",
"id": "fdb5e61a7220e993d57c8667559de75b72ad5b73",
"size": "1398",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "v1.0/salad/schema_salad/tests/test_ref_resolver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "95268"
},
{
"name": "Python",
"bytes": "1552"
},
{
"name": "Shell",
"bytes": "1230"
}
],
"symlink_target": ""
} |
import logging
import random
import time
from urllib import quote_plus
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, uid, ['groups_id'], context=context)['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='share', group_xml_id='group_share_user', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr = wizard._cr
uid = wizard._uid
context = wizard._context
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', size=32, required=True),
'domain': fields.char('Domain', size=256, help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
help="Access rights to be granted on the shared documents."),
'result_line_ids': fields.one2many('share.wizard.result.line', 'share_wizard_id', 'Summary', readonly=True),
'share_root_url': fields.function(_share_root_url, string='Share Access URL', type='char', size=512, readonly=True,
help='Main access page for users that are granted shared access'),
'name': fields.char('Share Title', size=64, required=True, help="Title for the share (displayed to users as menu and shortcut name)"),
'record_name': fields.char('Record name', size=128, help="Name of the shared record, if sharing a precise record"),
'message': fields.text("Personal Message", help="An optional personal message, to be included in the email notification."),
'embed_code': fields.function(_embed_code, type='text', string='Code',
help="Embed this code in your documents to provide a link to the "\
"shared document."),
'embed_option_title': fields.boolean('Display title'),
'embed_option_search': fields.boolean('Display search view'),
'embed_url': fields.function(_embed_url, string='Share URL', type='char', size=512, readonly=True),
}
_defaults = {
'view_type': 'page',
'user_type' : 'embedded',
'invite': False,
'domain': lambda self, cr, uid, context, *a: context.get('domain', '[]'),
'action_id': lambda self, cr, uid, context, *a: context.get('action_id'),
'access_mode': 'readwrite',
'embed_option_title': True,
'embed_option_search': True,
}
def has_email(self, cr, uid, context=None):
return bool(self.pool.get('res.users').browse(cr, uid, uid, context=context).email)
def go_step_1(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr,uid,ids,context)[0]
if wizard_data.user_type == 'emails' and not self.has_email(cr, uid, context=context):
raise osv.except_osv(_('No email address configured'),
_('You must configure your email address in the user preferences before using the Share button.'))
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'action_share_wizard_step1')
action = self.pool.get(model).read(cr, uid, res_id, context=context)
action['res_id'] = ids[0]
action.pop('context', '')
return action
def _create_share_group(self, cr, uid, wizard_data, context=None):
group_obj = self.pool.get('res.groups')
share_group_name = '%s: %s (%d-%s)' %('Shared', wizard_data.name, uid, time.time())
# create share group without putting admin in it
return group_obj.create(cr, UID_ROOT, {'name': share_group_name, 'share': True}, {'noadmin': True})
def _create_new_share_users(self, cr, uid, wizard_data, group_id, context=None):
"""Create one new res.users record for each email address provided in
wizard_data.new_users, ignoring already existing users.
Populates wizard_data.result_line_ids with one new line for
each user (existing or not). New users will also have a value
for the password field, so they can receive it by email.
Returns the ids of the created users, and the ids of the
ignored, existing ones."""
if context is None:
context = {}
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, UID_ROOT, uid, context=context)
# modify context to disable shortcuts when creating share users
context['noshortcut'] = True
created_ids = []
existing_ids = []
if wizard_data.user_type == 'emails':
# get new user list from email data
new_users = (wizard_data.new_users or '').split('\n')
new_users += [wizard_data.email_1 or '', wizard_data.email_2 or '', wizard_data.email_3 or '']
for new_user in new_users:
# Ignore blank lines
new_user = new_user.strip()
if not new_user: continue
# Ignore the user if it already exists.
if not wizard_data.invite:
existing = user_obj.search(cr, UID_ROOT, [('login', '=', new_user)])
else:
existing = user_obj.search(cr, UID_ROOT, [('email', '=', new_user)])
existing_ids.extend(existing)
if existing:
new_line = { 'user_id': existing[0],
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
continue
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_user,
'password': new_pass,
'name': new_user,
'email': new_user,
'groups_id': [(6,0,[group_id])],
'share': True,
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
elif wizard_data.user_type == 'embedded':
new_login = 'embedded-%s' % (uuid.uuid4().hex,)
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_login,
'password': new_pass,
'name': new_login,
'groups_id': [(6,0,[group_id])],
'share': True,
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
return created_ids, existing_ids
def _create_shortcut(self, cr, uid, values, context=None):
if context is None:
context = {}
new_context = context.copy()
for key in context:
if key.startswith('default_'):
del new_context[key]
dataobj = self.pool.get('ir.model.data')
menu_id = dataobj._get_id(cr, uid, 'base', 'menu_administration_shortcut')
shortcut_menu_id = int(dataobj.read(cr, uid, menu_id, ['res_id'], new_context)['res_id'])
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, values, new_context)
menu_data = {'name': values['name'],
'sequence': 10,
'action': 'ir.actions.act_window,'+str(action_id),
'parent_id': shortcut_menu_id,
'icon': 'STOCK_JUSTIFY_FILL'}
menu_obj = self.pool.get('ir.ui.menu')
menu_id = menu_obj.create(cr, UID_ROOT, menu_data)
sc_data = {'name': values['name'], 'sequence': UID_ROOT,'res_id': menu_id }
self.pool.get('ir.ui.view_sc').create(cr, uid, sc_data, new_context)
# update menu cache
user_groups = set(self.pool.get('res.users').read(cr, UID_ROOT, uid, ['groups_id'])['groups_id'])
key = (cr.dbname, shortcut_menu_id, tuple(user_groups))
menu_obj._cache[key] = True
return action_id
def _cleanup_action_context(self, context_str, user_id):
"""Returns a dict representing the context_str evaluated (safe_eval) as
a dict where items that are not useful for shared actions
have been removed. If the evaluation of context_str as a
dict fails, context_str is returned unaltered.
:param user_id: the integer uid to be passed as 'uid' in the
evaluation context
"""
result = False
if context_str:
try:
context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
result = dict(context)
for key in context:
# Remove all context keys that seem to toggle default
# filters based on the current user, as it makes no sense
# for shared users, who would not see any data by default.
if key and key.startswith('search_default_') and 'user_id' in key:
result.pop(key)
except Exception:
# Note: must catch all exceptions, as UnquoteEvalContext may cause many
# different exceptions, as it shadows builtins.
_logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
result = context_str
return result
def _shared_action_def(self, cr, uid, wizard_data, context=None):
copied_action = wizard_data.action_id
if wizard_data.access_mode == 'readonly':
view_mode = wizard_data.view_type
view_id = copied_action.view_id.id if copied_action.view_id.type == wizard_data.view_type else False
else:
view_mode = copied_action.view_mode
view_id = copied_action.view_id.id
action_def = {
'name': wizard_data.name,
'domain': copied_action.domain,
'context': self._cleanup_action_context(wizard_data.action_id.context, uid),
'res_model': copied_action.res_model,
'view_mode': view_mode,
'view_type': copied_action.view_type,
'search_view_id': copied_action.search_view_id.id if wizard_data.access_mode != 'readonly' else False,
'view_id': view_id,
'auto_search': True,
}
if copied_action.view_ids:
action_def['view_ids'] = [(0,0,{'sequence': x.sequence,
'view_mode': x.view_mode,
'view_id': x.view_id.id })
for x in copied_action.view_ids
if (wizard_data.access_mode != 'readonly' or x.view_mode == wizard_data.view_type)
]
return action_def
def _setup_action_and_shortcut(self, cr, uid, wizard_data, user_ids, make_home, context=None):
"""Create a shortcut to reach the shared data, as well as the corresponding action, for
each user in ``user_ids``, and assign it as their home action if ``make_home`` is True.
Meant to be overridden for special cases.
"""
values = self._shared_action_def(cr, uid, wizard_data, context=None)
user_obj = self.pool.get('res.users')
for user_id in user_ids:
action_id = self._create_shortcut(cr, user_id, values)
if make_home:
# We do this only for new share users, as existing ones already have their initial home
# action. Resetting to the default menu does not work well as the menu is rather empty
# and does not contain the shortcuts in most cases.
user_obj.write(cr, UID_ROOT, [user_id], {'action_id': action_id})
def _get_recursive_relations(self, cr, uid, model, ttypes, relation_fields=None, suffix=None, context=None):
"""Returns list of tuples representing recursive relationships of type ``ttypes`` starting from
model with ID ``model_id``.
:param model: browsable model to start loading relationships from
:param ttypes: list of relationship types to follow (e.g: ['one2many','many2many'])
:param relation_fields: list of previously followed relationship tuples - to avoid duplicates
during recursion
:param suffix: optional suffix to append to the field path to reach the main object
"""
if relation_fields is None:
relation_fields = []
local_rel_fields = []
models = [x[1].model for x in relation_fields]
model_obj = self.pool.get('ir.model')
model_osv = self.pool.get(model.model)
for colinfo in model_osv._all_columns.itervalues():
coldef = colinfo.column
coltype = coldef._type
relation_field = None
if coltype in ttypes and colinfo.column._obj not in models:
relation_model_id = model_obj.search(cr, UID_ROOT, [('model','=',coldef._obj)])[0]
relation_model_browse = model_obj.browse(cr, UID_ROOT, relation_model_id, context=context)
relation_osv = self.pool.get(coldef._obj)
#skip virtual one2many fields (related, ...) as there is no reverse relationship
if coltype == 'one2many' and hasattr(coldef, '_fields_id'):
# don't record reverse path if it's not a real m2o (that happens, but rarely)
dest_model_ci = relation_osv._all_columns
reverse_rel = coldef._fields_id
if reverse_rel in dest_model_ci and dest_model_ci[reverse_rel].column._type == 'many2one':
relation_field = ('%s.%s'%(reverse_rel, suffix)) if suffix else reverse_rel
local_rel_fields.append((relation_field, relation_model_browse))
for parent in relation_osv._inherits:
if parent not in models:
parent_model = self.pool.get(parent)
parent_colinfos = parent_model._all_columns
parent_model_browse = model_obj.browse(cr, UID_ROOT,
model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
if relation_field and coldef._fields_id in parent_colinfos:
# inverse relationship is available in the parent
local_rel_fields.append((relation_field, parent_model_browse))
else:
# TODO: can we setup a proper rule to restrict inherited models
# in case the parent does not contain the reverse m2o?
local_rel_fields.append((None, parent_model_browse))
if relation_model_id != model.id and coltype in ['one2many', 'many2many']:
local_rel_fields += self._get_recursive_relations(cr, uid, relation_model_browse,
[coltype], relation_fields + local_rel_fields, suffix=relation_field, context=context)
return local_rel_fields
def _get_relationship_classes(self, cr, uid, model, context=None):
"""Computes the *relationship classes* reachable from the given
model. The 4 relationship classes are:
- [obj0]: the given model itself (and its parents via _inherits, if any)
- [obj1]: obj0 and all other models recursively accessible from
obj0 via one2many relationships
- [obj2]: obj0 and all other models recursively accessible from
obj0 via one2many and many2many relationships
- [obj3]: all models recursively accessible from obj1 via many2one
relationships
Each class is returned as a list of pairs [(field,model_browse)], where
``model`` is the browse_record of a reachable ir.model, and ``field`` is
the dot-notation reverse relationship path coming from that model to obj0,
or None if there is no reverse path.
:return: ([obj0], [obj1], [obj2], [obj3])
"""
# obj0 class and its parents
obj0 = [(None, model)]
model_obj = self.pool.get(model.model)
ir_model_obj = self.pool.get('ir.model')
for parent in model_obj._inherits:
parent_model_browse = ir_model_obj.browse(cr, UID_ROOT,
ir_model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
obj0 += [(None, parent_model_browse)]
obj1 = self._get_recursive_relations(cr, uid, model, ['one2many'], relation_fields=obj0, context=context)
obj2 = self._get_recursive_relations(cr, uid, model, ['one2many', 'many2many'], relation_fields=obj0, context=context)
obj3 = self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
for dummy, model in obj1:
obj3 += self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
return obj0, obj1, obj2, obj3
def _get_access_map_for_groups_and_models(self, cr, uid, group_ids, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
user_right_ids = model_access_obj.search(cr, uid,
[('group_id', 'in', group_ids), ('model_id', 'in', model_ids)],
context=context)
user_access_matrix = {}
if user_right_ids:
for access_right in model_access_obj.browse(cr, uid, user_right_ids, context=context):
access_line = user_access_matrix.setdefault(access_right.model_id.model, set())
for perm in FULL_ACCESS:
if getattr(access_right, perm, 0):
access_line.add(perm)
return user_access_matrix
def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):
"""Adds access rights to group_id on object models referenced in ``fields_relations``,
intersecting with access rights of current user to avoid granting too much rights
"""
model_access_obj = self.pool.get('ir.model.access')
user_obj = self.pool.get('res.users')
target_model_ids = [x[1].id for x in fields_relations]
perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS
current_user = user_obj.browse(cr, uid, uid, context=context)
current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[x.id for x in current_user.groups_id], target_model_ids, context=context)
group_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[group_id], target_model_ids, context=context)
_logger.debug("Current user access matrix: %r", current_user_access_map)
_logger.debug("New group current access matrix: %r", group_access_map)
# Create required rights if allowed by current user rights and not
# already granted
for dummy, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
values = {
'name': _('Copied access for sharing'),
'group_id': group_id,
'model_id': model.id,
}
current_user_access_line = current_user_access_map.get(model.model,set())
existing_group_access_line = group_access_map.get(model.model,set())
need_creation = False
for perm in perms_to_add:
if perm in current_user_access_line \
and perm not in existing_group_access_line:
values.update({perm:True})
group_access_map.setdefault(model.model, set()).add(perm)
need_creation = True
if need_creation:
model_access_obj.create(cr, UID_ROOT, values)
_logger.debug("Creating access right for model %s with values: %r", model.model, values)
def _link_or_copy_current_user_rules(self, cr, current_user, group_id, fields_relations, context=None):
rule_obj = self.pool.get('ir.rule')
rules_done = set()
for group in current_user.groups_id:
for dummy, model in fields_relations:
for rule in group.rule_groups:
if rule.id in rules_done:
continue
rules_done.add(rule.id)
if rule.model_id.id == model.id:
if 'user.' in rule.domain_force:
# Above pattern means there is likely a condition
# specific to current user, so we must copy the rule using
# the evaluated version of the domain.
# And it's better to copy one time too much than too few
rule_obj.copy(cr, UID_ROOT, rule.id, default={
'name': '%s %s' %(rule.name, _('(Copy for sharing)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain, # evaluated version!
})
_logger.debug("Copying rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
else:
# otherwise we can simply link the rule to keep it dynamic
rule_obj.write(cr, SUPERUSER_ID, [rule.id], {
'groups': [(4,group_id)]
})
_logger.debug("Linking rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
def _check_personal_rule_or_duplicate(self, cr, group_id, rule, context=None):
"""Verifies that the given rule only belongs to the given group_id, otherwise
duplicate it for the current group, and unlink the previous one.
The duplicated rule has the original domain copied verbatim, without
any evaluation.
Returns the final rule to use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Share/User group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# new users by default receive all notifications by email
# A.
if not wizard_data.invite:
self.send_emails(cr, uid, wizard_data, context=context)
# B.
else:
# Invite (OpenSocial): automatically subscribe users to the record
res_id = 0
for cond in safe_eval(main_domain):
if cond[0] == 'id':
res_id = cond[2]
# Record id not found: issue
if res_id <= 0:
raise osv.except_osv(_('Record id not found'), _('The share engine has not been able to fetch a record_id for your invitation.'))
self.pool.get(model.model).message_subscribe(cr, uid, [res_id], new_ids + existing_ids, context=context)
# self.send_invite_email(cr, uid, wizard_data, context=context)
# self.send_invite_note(cr, uid, model.model, res_id, wizard_data, context=context)
# CLOSE
# A. Not invite: as before
# B. Invite: skip summary screen, get back to the record
# A.
if not wizard_data.invite:
dummy, step2_form_view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'share_step2_form')
return {
'name': _('Shared access created!'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'share.wizard',
'view_id': False,
'res_id': ids[0],
'views': [(step2_form_view_id, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
# B.
else:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': model.model,
'view_id': False,
'res_id': res_id,
'views': [(False, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def send_invite_note(self, cr, uid, model_name, res_id, wizard_data, context=None):
subject = _('Invitation')
body = 'has been <b>shared</b> with'
tmp_idx = 0
for result_line in wizard_data.result_line_ids:
body += ' @%s' % (result_line.user_id.login)
if tmp_idx < len(wizard_data.result_line_ids)-2:
body += ','
elif tmp_idx == len(wizard_data.result_line_ids)-2:
body += ' and'
body += '.'
return self.pool.get(model_name).message_post(cr, uid, [res_id], body=body, context=context)
def send_invite_email(self, cr, uid, wizard_data, context=None):
# TDE Note: not updated because will disappear
message_obj = self.pool.get('mail.message')
notification_obj = self.pool.get('mail.notification')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = _('Invitation to collaborate about %s') % (wizard_data.record_name)
body = _("Hello,\n\n")
body += _("I have shared %s (%s) with you!\n\n") % (wizard_data.record_name, wizard_data.name)
if wizard_data.message:
body += "%s\n\n" % (wizard_data.message)
if result_line.newly_created:
body += _("The documents are not attached, you can view them online directly on my OpenERP server at:\n %s\n\n") % (result_line.share_url)
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s" % (_("Username"), result_line.user_id.login) + "\n"
body += "%s: %s" % (_("Password"), result_line.password) + "\n"
body += "%s: %s" % (_("Database"), cr.dbname) + "\n"
body += _("The documents have been automatically added to your subscriptions.\n\n")
body += '%s\n\n' % ((user.signature or ''))
body += "--\n"
body += _("OpenERP is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
msg_id = message_obj.schedule_with_attach(cr, uid, user.email, [email_to], subject, body, model='', context=context)
notification_obj.create(cr, uid, {'user_id': result_line.user_id.id, 'message_id': msg_id}, context=context)
def send_emails(self, cr, uid, wizard_data, context=None):
_logger.info('Sending share notifications by email...')
mail_mail = self.pool.get('mail.mail')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
mail_ids = []
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = wizard_data.name
body = _("Hello,\n\n")
body += _("I've shared %s with you!\n\n") % wizard_data.name
body += _("The documents are not attached, you can view them online directly on my OpenERP server at:\n %s\n\n") % (result_line.share_url)
if wizard_data.message:
body += '%s\n\n' % (wizard_data.message)
if result_line.newly_created:
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s\n" % (_("Username"), result_line.user_id.login)
body += "%s: %s\n" % (_("Password"), result_line.password)
body += "%s: %s\n" % (_("Database"), cr.dbname)
else:
body += _("The documents have been automatically added to your current OpenERP documents.\n")
body += _("You may use your current login (%s) and password to view them.\n") % result_line.user_id.login
body += "\n\n%s\n\n" % ( (user.signature or '') )
body += "--\n"
body += _("OpenERP is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
mail_ids.append(mail_mail.create(cr, uid, {
'email_from': user.email,
'email_to': email_to,
'subject': subject,
'body_html': '<pre>%s</pre>' % body}, context=context))
# force direct delivery, as users expect instant notification
mail_mail.send(cr, uid, mail_ids, context=context)
_logger.info('%d share notification(s) sent.', len(mail_ids))
def onchange_embed_options(self, cr, uid, ids, opt_title, opt_search, context=None):
wizard = self.browse(cr, uid, ids[0], context)
options = dict(title=opt_title, search=opt_search)
return {'value': {'embed_code': self._generate_embedded_code(wizard, options)}}
share_wizard()
class share_result_line(osv.osv_memory):
_name = 'share.wizard.result.line'
_rec_name = 'user_id'
def _share_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
data = dict(dbname=cr.dbname, login=this.login, password=this.password)
if this.share_wizard_id and this.share_wizard_id.action_id:
data['action_id'] = this.share_wizard_id.action_id.id
ctx = dict(context, share_url_template_hash_arguments=['action_id'])
result[this.id] = this.share_wizard_id.share_url_template(context=ctx) % data
return result
_columns = {
'user_id': fields.many2one('res.users', required=True, readonly=True),
'login': fields.related('user_id', 'login', string='Login', type='char', size=64, required=True, readonly=True),
'password': fields.char('Password', size=64, readonly=True),
'share_url': fields.function(_share_url, string='Share URL', type='char', size=512),
'share_wizard_id': fields.many2one('share.wizard', 'Share Wizard', required=True, ondelete='cascade'),
'newly_created': fields.boolean('Newly created', readonly=True),
}
_defaults = {
'newly_created': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "b311d34e703217a96bf742f84ccc9f9c",
"timestamp": "",
"source": "github",
"line_count": 915,
"max_line_length": 171,
"avg_line_length": 55.83169398907104,
"alnum_prop": 0.5739928747602083,
"repo_name": "ntiufalara/openerp7",
"id": "e9d83be85cba014758ddf950123e0f6e77525834",
"size": "52064",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "openerp/addons/share/wizard/share_wizard.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
import logging
import urllib2
import re
import pickle
# http://www.crummy.com/software/BeautifulSoup/
from bs4 import BeautifulSoup
# https://code.google.com/p/prettytable/
from prettytable import PrettyTable
class Impactor(object):
BASE_URL_PREFIX=r'http://www.citefactor.org/journal-impact-factor-list-'
BASE_URL_SUFFIX=r'.html'
URL_REGEX_PREFIX=r'http://www\.citefactor\.org/journal-impact-factor-list-'
URL_REGEX_SUFFIX=r'_?[A-Z]?\.html'
def __init__(self, journal_db_file=None, year=2014):
logging.debug('journal_db_file={}, year={}'.format(journal_db_file, year))
self.journal_data = None
self.journal_db_file = journal_db_file
self.matches = set()
self.year = year
assert year in (2014, ) # won't bother doing past years, but might updated for future years...
self.base_url = self.BASE_URL_PREFIX + str(year) + self.BASE_URL_SUFFIX
self.url_regex = self.URL_REGEX_PREFIX + str(year) + self.URL_REGEX_SUFFIX
self.re = re.compile(self.url_regex)
self.load()
self.save()
def match(self, search_terms):
# If no terms specified, show all entries
if search_terms is None or len(search_terms) == 0:
for j in self.journal_data.values():
self.matches.add(j['ISSN'])
# Otherwise do search
issn_re = re.compile(r'\d{4}-\d{4}')
for s in search_terms:
if issn_re.match(s):
self.matches.add(s)
else:
for j in self.journal_data.values():
if j['JOURNAL'].lower().find(s.lower()) >= 0:
self.matches.add(j['ISSN'])
def print_table(self, sort_field='JOURNAL'):
if len(self.matches) == 0:
print 'No matches found.'
return
matches = list(self.matches)
headers = self.journal_data[matches[0]].keys()
logging.debug(headers)
t = PrettyTable(headers, sortby=sort_field)
#from prettytable import PLAIN_COLUMNS
#t.set_style(PLAIN_COLUMNS)
for j in matches:
t.add_row(self.journal_data[j].values())
print t
def load(self):
# Try to load from file
if self.journal_db_file is not None:
try:
with open(self.journal_db_file, 'rb') as f:
self.journal_data = pickle.load(f)
logging.debug('loaded journals from {}'.format(self.journal_db_file))
except:
pass
# If cannot load from file, load from URL
if self.journal_data is None:
logging.info('Fetching database from citefactor.org...')
self.journal_data = self.get_all_journal_data()
def save(self):
if self.journal_db_file is not None:
try:
with open(self.journal_db_file, 'wb') as f:
pickle.dump(self.journal_data, f, -1)
logging.debug('saved journals to {}'.format(self.journal_db_file))
except:
pass
def get_all_urls(self):
main_page_content = urllib2.urlopen(self.base_url).read()
soup = BeautifulSoup(main_page_content)
soup.prettify() # necessary?
return [self.base_url,] + [anchor['href'] for anchor in soup.find_all('a', href=self.re)]
def get_journal_table(self, url):
content = urllib2.urlopen(url).read()
soup = BeautifulSoup(content)
soup.prettify() # necessary?
t = soup.table
caption_re = re.compile(r'^Impact Factor ' + str(self.year)) # works for Year==2014 only
while t is not None:
if t.caption is None or t.caption.string is None or caption_re.match(t.caption.string) is None:
t = t.find_next()
continue
return t
def get_table_headers(self, table):
return [str(x.string) for x in table.tr.find_all('td')]
def get_journal_data(self, table):
headers = self.get_table_headers(table)
journals = dict()
for row in table.find_all('tr')[1:]:
cells = row.find_all('td')
j = dict( zip(headers, [str(x.string) for x in cells] ) )
logging.debug('importing: {}'.format(j))
journals[j['ISSN']] = j
return journals
def get_all_journal_data(self):
journals = dict()
for url in self.get_all_urls():
table = self.get_journal_table(url)
journals.update(self.get_journal_data(table))
logging.info('imported {} journal entries from citefactor.org'.format(len(journals)))
return journals
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('search', nargs='*', help='journal ISSNs and/or partial title matches (case insensitive)')
parser.add_argument('--db', default=None, type=str, help='journal database file (created from online data if missing/invalid)')
parser.add_argument('--sort', '-s', default='JOURNAL', help='sort by column')
parser.add_argument('--year', '-y', default=2014, type=int, help='year for citefactor URL (currently only 2014 supported)')
parser.add_argument('--debug', '-d', default=False, action='store_true')
args = parser.parse_args()
if args.debug is True:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug(args)
# Load, search, print
i = Impactor(year=args.year, journal_db_file=args.db)
i.match(args.search)
i.print_table(args.sort)
| {
"content_hash": "67ffb64cfccb0dbfe38bebe040c5470e",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 131,
"avg_line_length": 38.108108108108105,
"alnum_prop": 0.5930851063829787,
"repo_name": "andrew-hill/impactor",
"id": "d3566d168b366447820048a177c6ee92c5329066",
"size": "5663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impactor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5663"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
async def sample_list_batch_prediction_jobs():
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListBatchPredictionJobsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_batch_prediction_jobs(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async]
| {
"content_hash": "6d1ee1f9552a07086a9ea2793a2b8e08",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 28.65,
"alnum_prop": 0.7347294938917975,
"repo_name": "googleapis/python-aiplatform",
"id": "09302aa8328f7baafce502b236114efc459860f4",
"size": "1987",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import Renders, Initializer
urlpatterns = [
url(r'^search/', Initializer.init),
url(r'^references', Renders.references_render),
url(r'^help', Renders.help_render),
url(r'^', Renders.index),
]
| {
"content_hash": "233be1d8064db670284d11e64bdccd3a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.6850393700787402,
"repo_name": "nlx-group/LX-WordNetBrowser",
"id": "d9335e05f96155e7cecb95b4ac64ee65d5dfd6e2",
"size": "254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "my wordnet/search/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8419"
},
{
"name": "HTML",
"bytes": "16852"
},
{
"name": "JavaScript",
"bytes": "110534"
},
{
"name": "Makefile",
"bytes": "56241"
},
{
"name": "Python",
"bytes": "98427"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0005_add_fields_and_set_defaults'),
]
operations = [
migrations.CreateModel(
name='Properties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=512, unique=True)),
('value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "d585f0b8330ea13d56cf45d64f8780f1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 150,
"avg_line_length": 40.74074074074074,
"alnum_prop": 0.63,
"repo_name": "EndyKaufman/django-postgres-angularjs-blog",
"id": "eae7acdb8dfb8f630b658a56cd5b658161d5637a",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/manager/migrations/0006_properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54052"
},
{
"name": "HTML",
"bytes": "5156"
},
{
"name": "JavaScript",
"bytes": "3507240"
},
{
"name": "Python",
"bytes": "194540"
},
{
"name": "Shell",
"bytes": "4048"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
import xml.etree.ElementTree as ET
from urlutils import urlencode_no_plus
class Game(object):
def __init__(self, id, title, release_date=None, platform=None, overview=None, esrb_rating=None,
genres=None, players=None, coop=None, youtube_url=None, publisher=None, developer=None, rating=None,
logo_url=None):
self.id = id
self.title = title
self.release_date = release_date
self.platform = platform
self.overview = overview
self.esrb_rating = esrb_rating
self.genres = genres
self.players = players
self.coop = coop
self.youtube_url = youtube_url
self.publisher = publisher
self.developer = developer
self.rating = rating
self.logo_url = logo_url
class Platform(object):
def __init__(self, id, name, alias=None, console=None, controller=None, graphics=None, max_controllers=None,rating=None,
display=None, manufacturer=None, cpu=None, memory=None, sound=None, media=None, developer=None,
overview=None):
self.id = id
self.name = name
self.alias = alias
self.console = console
self.controller = controller
self.overview = overview
self.developer = developer
self.manufacturer = manufacturer
self.cpu = cpu
self.memory = memory
self.graphics = graphics
self.sound = sound
self.display = display
self.media = media
self.max_controllers = max_controllers
self.rating = rating
class APIException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class API(object):
@staticmethod
def make_call(api_url, query_args=None):
# api_url is expected to be the fully constructed URL, with any needed arguments appended.
# This function will simply make the call, and return the response as an ElementTree object for parsing,
# If response cannot be parsed because it is not valid XML, this function assumes an API error and raises an
# APIException, passing forward the pages contents (which generally gives some indication of the error.
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
if query_args is not None:
get_params = urlencode_no_plus.urlencode_no_plus(query_args)
response = opener.open(api_url+'%s' % get_params)
else:
response = urllib.urlopen(api_url)
page = response.read()
# Make sure the XML Parser doesn't return a ParsError. If it does, it's probably and API Issue, so raise an
# exception, printing the response from the API call.
try:
xml_response = ET.fromstring(page)
except ET.ParseError:
raise APIException(page)
return xml_response
def get_platforms_list(self):
platforms_list = []
GET_PLATFORMS_LIST_ENDPOINT = 'http://thegamesdb.net/api/GetPlatformsList.php'
xml_response = self.make_call(GET_PLATFORMS_LIST_ENDPOINT)
for element in xml_response.iter(tag="Platform"):
for subelement in element:
if subelement.tag == 'id':
platform_id = subelement.text
if subelement.tag == 'name':
platform_name = subelement.text
if subelement.tag == 'alias':
platform_alias = subelement.text
platforms_list.append(Platform(platform_id, platform_name, alias=platform_alias))
return platforms_list
def get_platform(self, id):
# TODO Add support for fetching platform images under the <Images> element
GET_PLATFORM_ENDPOINT = 'http://thegamesdb.net/api/GetPlatform.php?'
query_args = {'id': id}
xml_response = self.make_call(GET_PLATFORM_ENDPOINT, query_args)
# TODO These are all optional fields. There's probably a better way to handle this than setting them all to None.
platform_id = None
platform_name = None
platform_console = None
platform_controller = None
platform_graphics = None
platform_max_controllers = None
platform_rating = None
platform_display = None
platform_manufacturer = None
platform_cpu = None
platform_memory = None
platform_sound = None
platform_media = None
platform_developer = None
platform_overview = None
for element in xml_response.iter(tag="Platform"):
for subelement in element:
if subelement.tag == 'id':
platform_id = subelement.text
if subelement.tag == 'Platform':
platform_name = subelement.text
if subelement.tag == 'console':
platform_console = subelement.text
if subelement.tag == 'controller':
platform_controller = subelement.text
if subelement.tag == 'overview':
platform_overview = subelement.text
if subelement.tag == 'developer':
platform_developer = subelement.text
if subelement.tag == 'manufacturer':
platform_manufacturer = subelement.text
if subelement.tag == 'cpu':
platform_cpu = subelement.text
if subelement.tag == 'memory':
platform_memory = subelement.text
if subelement.tag == 'graphics':
platform_graphics = subelement.text
if subelement.tag == 'sound':
platform_sound = subelement.text
if subelement.tag == 'display':
platform_display = subelement.text
if subelement.tag == 'media':
platform_media = subelement.text
if subelement.tag == 'max_controllers':
platform_max_controllers = subelement.text
if subelement.tag == 'rating':
platform_rating = subelement.text
if (platform_id == None or platform_name == None):
raise APIException("get_platform returned a result without required fields id or platform")
return Platform(platform_id, platform_name, console=platform_console, controller=platform_controller,
graphics=platform_graphics, max_controllers=platform_max_controllers, rating=platform_rating,
display=platform_display, manufacturer=platform_manufacturer, cpu=platform_cpu,
memory=platform_memory, sound=platform_sound, media=platform_media, developer=platform_developer,
overview=platform_overview)
def get_platform_games(self, platform_id):
GET_PLATFORM_GAMES_LIST_ENDPOINT = 'http://thegamesdb.net/api/GetPlatformGames.php?'
query_args = {'platform': platform_id}
xml_response = self.make_call(GET_PLATFORM_GAMES_LIST_ENDPOINT, query_args)
platform_games_list = []
for element in xml_response.iter(tag="Game"):
platform_games_list_release_date = None
for subelement in element:
if subelement.tag == 'id':
platform_games_list_id = subelement.text
if subelement.tag == 'GameTitle':
platform_games_list_name = subelement.text
if subelement.tag == 'ReleaseDate':
# platform_games_list_release_date = datetime.strptime(subelement.text, "%m/%d/%Y")
# Omitting line above since date comes back in an inconsistent format, for example only %Y
platform_games_list_release_date = subelement.text
platform_games_list.append(Game(platform_games_list_id, platform_games_list_name,
release_date=platform_games_list_release_date))
return platform_games_list
def get_game(self, id=None, name=None, platform=None):
if id is not None and name is not None: # One of these arguments must be passed
return None
else:
query_args = {}
if id is not None:
query_args['id'] = id
if name is not None:
query_args['name'] = name
if platform is not None:
query_args['platform'] = platform
GET_GAME_ENDPOINT = 'http://thegamesdb.net/api/GetGame.php?'
xml_response = self.make_call(GET_GAME_ENDPOINT, query_args)
games_list = []
game_base_img_url = None
for element in xml_response.iter(tag="baseImgUrl"):
game_base_img_url = element.text
for element in xml_response.iter(tag="Game"):
game_overview = None
game_release_date = None
game_esrb_rating = None
game_youtube_url = None
game_rating = None
game_logo_url = None
game_players = None
game_coop = None
game_genres = None
game_publisher = None
game_developer = None
for subelement in element:
if subelement.tag == 'id':
game_id = subelement.text
if subelement.tag == 'GameTitle':
game_title = subelement.text
if subelement.tag == 'Platform':
game_platform = subelement.text
if subelement.tag == 'ReleaseDate':
# games_release_date = datetime.strptime(subelement.text, "%m/%d/%Y")
game_release_date = subelement.text
if subelement.tag == 'Overview':
game_overview = subelement.text
if subelement.tag == 'ESRB':
game_esrb_rating = subelement.text
if subelement.tag == 'Genres':
game_genres = ''
for genre_element in subelement.iter(tag="genre"):
# TODO put elements in a more appropriate data structure
game_genres += genre_element.text
if subelement.tag == 'Players':
game_players = subelement.text
if subelement.tag == 'Co-op':
if subelement.text == 'No':
game_coop = False
elif subelement.text == 'Yes':
game_coop = True
if subelement.tag == 'Youtube':
game_youtube_url = subelement.text
if subelement.tag == 'Publisher':
game_publisher = subelement.text
if subelement.tag == 'Developer':
game_developer = subelement.text
if subelement.tag == 'Rating':
game_rating = subelement.text
if subelement.tag == 'clearlogo':
# TODO Capture image dimensions from API resposne
game_logo_url = game_base_img_url + subelement.text
games_list.append(Game(game_id, game_title, release_date=game_release_date, platform=game_platform,
overview=game_overview, esrb_rating=game_esrb_rating, genres=game_genres,
players=game_players, coop=game_coop, youtube_url=game_youtube_url,
publisher=game_publisher, developer=game_developer, rating=game_rating,
logo_url=game_logo_url))
if len(games_list) == 0:
return None
elif len(games_list) == 1:
return games_list[0]
else:
return games_list
def get_games_list(self, name, platform=None, genre=None):
query_args = {'name': name}
if platform is not None:
query_args['platform'] = platform
if genre is not None:
query_args['genre'] = genre
games_list = []
GET_GAMES_LIST_ENDPOINT = 'http://thegamesdb.net/api/GetGamesList.php?'
xml_response = self.make_call(GET_GAMES_LIST_ENDPOINT, query_args)
for element in xml_response.iter(tag="Game"):
game_release_date = None
game_platform = None
for subelement in element:
if subelement.tag == 'id':
game_id = subelement.text
if subelement.tag == 'GameTitle':
game_title = subelement.text
if subelement.tag == 'ReleaseDate':
game_release_date = subelement.text
if subelement.tag == 'Platform':
game_platform = subelement.text
games_list.append(Game(game_id, game_title, release_date=game_release_date, platform=game_platform))
return games_list
| {
"content_hash": "c7873699a517fa9becc90eeb9231fb7a",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 124,
"avg_line_length": 46.07420494699647,
"alnum_prop": 0.5684485006518905,
"repo_name": "jameserrico/python-gamesdb",
"id": "b4bb10c67173407f00cc993cdda02953a01bb1a0",
"size": "13039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamesdb/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16944"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages, setup
package_name = "culqi"
package_path = os.path.abspath(os.path.dirname(__file__))
repositorty_url = "https://github.com/culqi/culqi"
long_description_file_path = os.path.join(package_path, "README.md")
long_description = ""
try:
with open(long_description_file_path) as f:
long_description = f.read()
except IOError:
pass
setup(
name=package_name,
packages=find_packages(exclude=[".*", "docs", "scripts", "tests*", "legacy",]),
include_package_data=True,
version=__import__("culqi").__version__,
description="""Biblioteca de Culqi en Python""",
long_description=long_description,
long_description_content_type="text/markdown",
author="Willy Aguirre, Joel Ibaceta, Martin Josemaría",
zip_safe=False,
keywords=["Api Client", "Payment Integration", "Culqi", "Python 3", "Python 2",],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
url=repositorty_url,
download_url="%(url)s/-/archive/%(version)s/%(package)s-%(version)s.tar.gz"
% {
"url": repositorty_url,
"version": __import__("culqi").__version__,
"package": package_name,
},
requires=["requests", "jsonschema"],
install_requires=["requests", "jsonschema"],
)
| {
"content_hash": "059b3c0cf23776827262561702b3c482",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 85,
"avg_line_length": 35.06,
"alnum_prop": 0.6274957216200798,
"repo_name": "culqi/culqi-python",
"id": "f2895adb1bb80717f13608a90d6fce29da52aa45",
"size": "1800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49720"
}
],
"symlink_target": ""
} |
import scrapy
class VReviewsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
username = scrapy.Field()
date = scrapy.Field()
rate = scrapy.Field()
content = scrapy.Field()
link = scrapy.Field()
museum = scrapy.Field()
| {
"content_hash": "8925268ab66737c83081bf3ee2f612af",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 26.363636363636363,
"alnum_prop": 0.6448275862068965,
"repo_name": "dollymsq/SI_Review_Scrapy",
"id": "8331d292fe064bd4f7070c57161f1e812e292b3d",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v_reviews/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2927"
}
],
"symlink_target": ""
} |
from .datalog2sql.datalog2sqlconverter import Datalog2SqlConverter
from .sql_engine.query_evaluator import QueryEvaluator
from .sql_engine.db_state_tracker import DBStateTracker
from colorama import *
from .interpreter.interpreter_parse import *
from .interpreter.syntax_highlighter import SyntaxHighlight
import sys
import os
qe = QueryEvaluator()
db_state_tracker = DBStateTracker(qe)
converter = Datalog2SqlConverter(db_state_tracker)
def help():
print("========HELP========")
print("\nThe following commands are supported by YADI:")
print("/assert - Used to interactively add new rules to the database. Usage example: /assert a(a4)\n")
print("/clrscr - Clears the terminal.\n")
print("/curdb - Shows the current database path.\n")
print("/dbschema - Lists the database schema.\n")
print("/dropview <view name> - Drops the view from the database.\n")
print("/droptable <table name> - Drops the table from the database. \
(WARNING: Drop cascades to dependent views.)\n")
print("/help - Prints a list of available commands with their descriptions.\n")
print("/script \"path\" - Loads a Datalog program from a file in the specified path."
" Usage example: /script \"C:/file.txt\" \n")
print("/setdb - Set the database to use with YADI. The last configuration before quitting YADI will persist"
" for the next session.\n")
print("/quit - Quits YADI.\n")
print("====================")
def dbschema():
qe.get_schema()
def set_db():
print("Setting database parameters. To quit at any time, type /quitset")
args = [[], []]
args[0].append("Username> ")
args[0].append("Password> ")
args[0].append("Host> ")
args[0].append("Port> ")
args[0].append("Database> ")
for i in range(0, 5):
x = ""
while x == "":
print(Fore.YELLOW+args[0][i]+Fore.RESET, end="")
x = input().strip()
if x == "" and i is not 1:
print(Fore.RED+"Error: Field cannot be left blank."+Fore.RESET)
if i is 1 and x == "":
break
if x == "/quitset":
return
args[1].append(x)
str_engine = 'postgresql://'+args[1][0]
if args[1][1] is not "":
str_engine += ':' + args[1][1]
str_engine += '@' + args[1][2] + ':' + args[1][3] + '/' + args[1][4]
qe.set_engine(str_engine)
def clrscr():
os.system('cls' if os.name == 'nt' else 'clear')
def do_assert(input_line):
line_to_trans = input_line[len('/assert'):]
execute_translation(line_to_trans, True)
def loadscript(path):
np = path.replace('\"', '')
print("Loading file from: "+np)
str_concat = ""
try:
with open(np, 'r') as f:
for data_line in f:
if not data_line.startswith('%'):
interpret_line(data_line)
str_concat += data_line
print("Datalog program read:\n"+SyntaxHighlight().highlight(str_concat))
except Exception as e:
print(Fore.RED+str(e)+Fore.RESET)
def execute_translation(input_line, is_assertion = False):
sql_queries = converter.convertDatalog2Sql(
input_line,
is_assertion
)
for s in sql_queries:
try:
qe.evaluate(s)
except Exception as e:
if not 'not return rows' in str(e):
print(Fore.RED+'Query evaluation error: '+str(e)+Fore.RESET)
def get_db_url():
print(Fore.YELLOW+"Current configuration:")
qe.print_engine_url()
print(Fore.RESET)
def drop_view(relation_name):
try:
qe.execute('DROP VIEW IF EXISTS ' + relation_name + ';')
except Exception as e:
print(Fore.RED+'Query evaluation error: '+str(e)+Fore.RESET)
def drop_relation(relation_name):
try:
qe.execute('DROP TABLE IF EXISTS ' + relation_name + ' CASCADE;')
except Exception as e:
print(Fore.RED+'Query evaluation error: '+str(e)+Fore.RESET)
# TODO: ensure called on ctrl-C
def quit_yadi():
qe.dispose_last()
qe.saveconf()
# del db_state_tracker
sys.exit(0)
def start():
introString = """\
=========================================
Welcome to
__ __ ______ ____ ______
/\ \ /\ \/\ _ \ /\ _`\ /\__ _\\
\ `\`\\\\/'/\ \ \_\ \\\\ \ \/\ \\/_/\ \/
`\ `\ /' \ \ __ \\\\ \ \ \ \ \ \ \\
`\ \ \ \ \ \/\ \\\\ \ \_\ \ \_\ \__
\ \_\ \ \_\ \_\\\\ \____/ /\_____\\
\/_/ \/_/\/_/ \/___/ \/_____/
Yet Another Datalog Intepreter v1.0
=========================================
To begin, type a Datalog query. For a list of commands, type /help"""
print(Fore.YELLOW+introString+Fore.RESET)
if qe.initialize_db():
get_db_url()
while True:
print(Fore.YELLOW+'\nyadi> '+Fore.RESET, end="")
read_line = input().strip()
interpret_line(read_line)
def interpret_line(read_line):
read_line = read_line.strip()
if read_line == "":
return
int_parser = IntParse()
if read_line[0] == "/":
try:
parsed_statement = int_parser.parse_command(read_line)
if parsed_statement[0] == "/quit":
quit_yadi()
elif parsed_statement[0] == "/help":
help()
elif parsed_statement[0] == "/assert ":
do_assert(read_line)
elif parsed_statement[0] == "/script ":
loadscript(parsed_statement[1])
elif parsed_statement[0] == "/clrscr":
clrscr()
elif parsed_statement[0] == "/setdb":
set_db()
elif parsed_statement[0] == "/curdb":
get_db_url()
elif parsed_statement[0] == "/dbschema":
dbschema()
elif parsed_statement[0] == "/dropview":
drop_view(parsed_statement[1])
elif parsed_statement[0] == "/droptable":
drop_relation(parsed_statement[1])
except InterpreterException as e:
print(Fore.RED+"Interpreter error: "+str(e)+Fore.RESET)
else:
execute_translation(read_line)
init()
start()
| {
"content_hash": "d83fdd641b6ff66e82aa98d163769484",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 112,
"avg_line_length": 31.642857142857142,
"alnum_prop": 0.5428893905191874,
"repo_name": "saltzm/yadi",
"id": "a5cc523c041f8938d3661a38f96740e76b24a69e",
"size": "6202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yadi/yadi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "137182"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
from .TupleBanditsPureExploration import *
from .Info import *
| {
"content_hash": "1949e4272b04626f35d6fd684c2a104b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 31.5,
"alnum_prop": 0.8095238095238095,
"repo_name": "kgjamieson/NEXT-psych",
"id": "14a6b7291507998dd873eb437718ad4ff20f3a10",
"size": "63",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "next/apps/TupleBanditsPureExploration/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "146431"
},
{
"name": "HTML",
"bytes": "276926"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "2060070"
},
{
"name": "Makefile",
"bytes": "2880"
},
{
"name": "Perl",
"bytes": "5546"
},
{
"name": "Python",
"bytes": "870716"
},
{
"name": "Shell",
"bytes": "8508"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from reddit.forms import SubmissionForm
from reddit.models import Submission
from users.models import RedditUser
class TestSubmissionForm(TestCase):
def test_full_valid_submission(self):
test_data = {
'title': 'submission_title',
'url': 'http://example.com',
'text': 'submission text'
}
form = SubmissionForm(data=test_data)
self.assertTrue(form.is_valid())
def test_minimum_data_required(self):
test_data = {
'title': 'submission title'
}
form = SubmissionForm(data=test_data)
self.assertTrue(form.is_valid())
def test_invalid_data(self):
test_data = {
'title': '.' * 300,
'url': 'notaurl',
'text': '.' * 5001
}
form = SubmissionForm(data=test_data)
self.assertEqual(form.errors['title'], ["Ensure this value has at most 250 characters (it has 300)."])
self.assertEqual(form.errors['url'], ["Enter a valid URL."])
self.assertEqual(form.errors['text'], ["Ensure this value has at most 5000 characters (it has 5001)."])
self.assertFalse(form.is_valid())
class TestSubmissionRequests(TestCase):
def setUp(self):
self.c = Client()
self.login_data = {
'username': 'submissiontest',
'password': 'password'
}
RedditUser.objects.create(
user=User.objects.create_user(**self.login_data)
)
def test_logged_out(self):
r = self.c.get(reverse('submit'))
self.assertRedirects(r, "{}?next={}".format(
reverse('login'), reverse('submit')
))
def test_logged_in_GET(self):
self.c.login(**self.login_data)
r = self.c.get(reverse('submit'))
self.assertIsInstance(r.context['form'], SubmissionForm)
def test_making_a_submission(self):
self.c.login(**self.login_data)
test_data = {
'title': 'submission title',
'url': 'http://example.com',
'text': 'submission text'
}
r = self.c.post(reverse('submit'), data=test_data, follow=True)
submission = Submission.objects.filter(**test_data).first()
self.assertIsNotNone(submission)
self.assertRedirects(r, reverse('thread', args=(submission.id,)))
self.assertContains(r, 'Submission created')
def test_missing_fields(self):
self.c.login(**self.login_data)
test_data = {
'url': 'http://example.com',
'text': 'submission text'
}
r = self.c.post(reverse('submit'), data=test_data)
self.assertNotContains(r, 'Submission created')
self.assertContains(r, 'This field is required.')
| {
"content_hash": "c472e50f960c98f84319b08ea1fb4593",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 111,
"avg_line_length": 35.146341463414636,
"alnum_prop": 0.5919500346981263,
"repo_name": "Nikola-K/django_reddit",
"id": "50e36fe769217ec9d11a693db74e906df3636aaa",
"size": "2882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit/tests/test_submission.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2416"
},
{
"name": "HTML",
"bytes": "22856"
},
{
"name": "JavaScript",
"bytes": "6902"
},
{
"name": "Python",
"bytes": "92178"
}
],
"symlink_target": ""
} |
import json
import os
import requests
import sys
REST_API="http://alpha.opencloud.us:8000/xos/"
NODES_API = REST_API + "nodes/"
SLICES_API = REST_API + "slices/"
SLIVERS_API = REST_API + "slivers/"
opencloud_auth=("demo@onlab.us", "demo")
def get_slice_id(slice_name):
r = requests.get(SLICES_API + "?name=%s" % slice_name, auth=opencloud_auth)
return r.json()[0]["id"]
def get_node_id(host_name):
r = requests.get(NODES_API)
nodes = r.json()
for node in nodes:
if node["name"].lower() == host_name.lower():
return node["id"]
print >> sys.stderr, "Error: failed to find node %s" % host_name
sys.exit(-1)
def get_slivers(slice_id=None, node_id=None):
queries = []
if slice_id:
queries.append("slice=%s" % str(slice_id))
if node_id:
queries.append("node=%s" % str(node_id))
if queries:
query_string = "?" + "&".join(queries)
else:
query_string = ""
r = requests.get(SLIVERS_API + query_string, auth=opencloud_auth)
return r.json()
def main():
global opencloud_auth
if len(sys.argv)!=5:
print >> sys.stderr, "syntax: get_instance_name.py <username>, <password>, <hostname> <slicename>"
sys.exit(-1)
username = sys.argv[1]
password = sys.argv[2]
hostname = sys.argv[3]
slice_name = sys.argv[4]
opencloud_auth=(username, password)
slice_id = get_slice_id(slice_name)
node_id = get_node_id(hostname)
slivers = get_slivers(slice_id, node_id)
instance_names = [x["instance_name"] for x in slivers if x["instance_name"]]
# return the last one in the list (i.e. the newest one)
print sorted(instance_names)[-1]
if __name__ == "__main__":
main()
| {
"content_hash": "ac68b643aa5fa4e0cef413e50bc78da2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 106,
"avg_line_length": 25.58823529411765,
"alnum_prop": 0.6068965517241379,
"repo_name": "wathsalav/xos",
"id": "3450df59a241496b3062f8f611172705dc84f176",
"size": "1764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xos/tools/get_instance_name.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "370"
},
{
"name": "CSS",
"bytes": "37088"
},
{
"name": "HTML",
"bytes": "636864"
},
{
"name": "JavaScript",
"bytes": "760492"
},
{
"name": "Makefile",
"bytes": "2717"
},
{
"name": "Python",
"bytes": "1160110"
},
{
"name": "Shell",
"bytes": "10483"
}
],
"symlink_target": ""
} |
from flask import Flask
from graphql_server.flask import GraphQLView
from tests.flask.schema import Schema
def create_app(path="/graphql", **kwargs):
server = Flask(__name__)
server.debug = True
server.add_url_rule(
path, view_func=GraphQLView.as_view("graphql", schema=Schema, **kwargs)
)
return server
if __name__ == "__main__":
app = create_app(graphiql=True)
app.run()
| {
"content_hash": "3bb83726e7721ade1cd66abc30bfa97c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 23,
"alnum_prop": 0.6570048309178744,
"repo_name": "graphql-python/graphql-server-core",
"id": "ec9e9d0661c2d6d7b1d1dd0b1fca0c5b63c09cae",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/flask/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "218628"
},
{
"name": "Shell",
"bytes": "329"
}
],
"symlink_target": ""
} |
import hashlib
from io import BytesIO
import logging
import socket
import pycurl
from common import DCOSAuth
from lrucache import LRUCache
logger = logging.getLogger('utils')
# The maximum number of clashes to allow when assigning a port.
MAX_CLASHES = 50
class ServicePortAssigner(object):
"""
Helper class to assign service ports.
Ordinarily Marathon should assign the service ports, but Marathon issue
https://github.com/mesosphere/marathon/issues/3636 means that service
ports are not returned for applications using IP-per-task. We work around
that here by assigning deterministic ports from a configurable range when
required.
Note that auto-assigning ports is only useful when using vhost: the ports
that we assign here are not exposed to the client.
The LB command line options --min-serv-port-ip-per-task and
--max-serv-port-ip-per-task specify the allowed range of ports to
auto-assign from. The range of ports used for auto-assignment should be
selected to ensure no clashes with the exposed LB ports and the
Marathon-assigned services ports.
The service port assigner provides a mechanism to auto assign service ports
using the application name to generate service port (while preventing
clashes when the port is already claimed by another app). The assigner
provides a deterministic set of ports for a given ordered set of port
requests.
"""
def __init__(self):
self.min_port = None
self.max_port = None
self.max_ports = None
self.can_assign = False
self.next_port = None
self.ports_by_app = {}
def _assign_new_service_port(self, app, task_port):
assert self.can_assign
if self.max_ports <= len(self.ports_by_app):
logger.warning("Service ports are exhausted")
return None
# We don't want to be searching forever, so limit the number of times
# we clash to the number of remaining ports.
ports = self.ports_by_app.values()
port = None
for i in range(MAX_CLASHES):
hash_str = "%s-%s-%s" % (app['id'], task_port, i)
hash_val = hashlib.sha1(hash_str.encode("utf-8")).hexdigest()
hash_int = int(hash_val[:8], 16)
trial_port = self.min_port + (hash_int % self.max_ports)
if trial_port not in ports:
port = trial_port
break
if port is None:
for port in range(self.min_port, self.max_port + 1):
if port not in ports:
break
# We must have assigned a unique port by now since we know there were
# some available.
assert port and port not in ports, port
logger.debug("Assigned new port: %d", port)
return port
def _get_service_port(self, app, task_port):
key = (app['id'], task_port)
port = (self.ports_by_app.get(key) or
self._assign_new_service_port(app, task_port))
self.ports_by_app[key] = port
return port
def set_ports(self, min_port, max_port):
"""
Set the range of ports that we can use for auto-assignment of
service ports - just for IP-per-task apps.
:param min_port: The minimum port value
:param max_port: The maximum port value
"""
assert not self.ports_by_app
assert max_port >= min_port
self.min_port = min_port
self.max_port = max_port
self.max_ports = max_port - min_port + 1
self.can_assign = self.min_port and self.max_port
def reset(self):
"""
Reset the assigner so that ports are newly assigned.
"""
self.ports_by_app = {}
def get_service_ports(self, app):
"""
Return a list of service ports for this app.
:param app: The application.
:return: The list of ports. Note that if auto-assigning and ports
become exhausted, a port may be returned as None.
"""
mode = get_app_networking_mode(app)
if mode == "container" or mode == "container/bridge":
# Here we must use portMappings
portMappings = get_app_port_mappings(app)
if len(portMappings) > 0:
ports = filter(lambda p: p is not None,
map(lambda p: p.get('servicePort', None),
portMappings))
ports = list(ports)
if ports:
return list(ports)
ports = app.get('ports', [])
if 'portDefinitions' in app:
ports = filter(lambda p: p is not None,
map(lambda p: p.get('port', None),
app.get('portDefinitions', []))
)
ports = list(ports) # wtf python?
# This supports legacy ip-per-container for Marathon 1.4.x and prior
if not ports and mode == "container" and self.can_assign \
and len(app['tasks']) > 0:
task = app['tasks'][0]
task_ports = get_app_task_ports(app, task, mode)
if len(task_ports) > 0:
ports = [self._get_service_port(app, task_port)
for task_port in task_ports]
logger.debug("Service ports: %r", ports)
return ports
class CurlHttpEventStream(object):
def __init__(self, url, auth, verify):
self.url = url
self.received_buffer = BytesIO()
headers = ['Cache-Control: no-cache', 'Accept: text/event-stream']
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.ENCODING, 'gzip')
self.curl.setopt(pycurl.CONNECTTIMEOUT, 10)
self.curl.setopt(pycurl.WRITEDATA, self.received_buffer)
# Marathon >= 1.7.x returns 30x responses for /v2/events responses
# when they're coming from a non-leader. So we follow redirects.
self.curl.setopt(pycurl.FOLLOWLOCATION, True)
self.curl.setopt(pycurl.MAXREDIRS, 1)
self.curl.setopt(pycurl.UNRESTRICTED_AUTH, True)
# The below settings are to prevent the connection from hanging if the
# connection breaks silently. Since marathon-lb only listens, silent
# connection failure results in marathon-lb waiting infinitely.
#
# Minimum bytes/second below which it is considered "low speed". So
# "low speed" here refers to 0 bytes/second.
self.curl.setopt(pycurl.LOW_SPEED_LIMIT, 1)
# How long (in seconds) it's allowed to go below the speed limit
# before it times out
self.curl.setopt(pycurl.LOW_SPEED_TIME, 300)
if auth and type(auth) is DCOSAuth:
auth.refresh_auth_header()
headers.append('Authorization: %s' % auth.auth_header)
elif auth:
self.curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
self.curl.setopt(pycurl.USERPWD, '%s:%s' % auth)
if verify:
self.curl.setopt(pycurl.CAINFO, verify)
else:
self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)
self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)
self.curl.setopt(pycurl.HTTPHEADER, headers)
self.curlmulti = pycurl.CurlMulti()
self.curlmulti.add_handle(self.curl)
self.status_code = 0
SELECT_TIMEOUT = 10
def _any_data_received(self):
return self.received_buffer.tell() != 0
def _get_received_data(self):
result = self.received_buffer.getvalue()
self.received_buffer.truncate(0)
self.received_buffer.seek(0)
return result
def _check_status_code(self):
if self.status_code == 0:
self.status_code = self.curl.getinfo(pycurl.HTTP_CODE)
if self.status_code != 0 and self.status_code != 200:
raise Exception(str(self.status_code) + ' ' + self.url)
def _perform_on_curl(self):
while True:
ret, num_handles = self.curlmulti.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
return num_handles
def _iter_chunks(self):
while True:
remaining = self._perform_on_curl()
if self._any_data_received():
self._check_status_code()
yield self._get_received_data()
if remaining == 0:
break
self.curlmulti.select(self.SELECT_TIMEOUT)
self._check_status_code()
self._check_curl_errors()
def _check_curl_errors(self):
for f in self.curlmulti.info_read()[2]:
raise pycurl.error(*f[1:])
def iter_lines(self):
chunks = self._iter_chunks()
return self._split_lines_from_chunks(chunks)
@staticmethod
def _split_lines_from_chunks(chunks):
# same behaviour as requests' Response.iter_lines(...)
pending = None
for chunk in chunks:
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
def resolve_ip(host):
"""
:return: string, an empty string indicates that no ip was found.
"""
cached_ip = ip_cache.get().get(host, "")
if cached_ip != "":
return cached_ip
else:
try:
logger.debug("trying to resolve ip address for host %s", host)
ip = socket.gethostbyname(host)
ip_cache.get().set(host, ip)
return ip
except socket.gaierror:
return ""
class LRUCacheSingleton(object):
def __init__(self):
self.lru_cache = None
def get(self):
if self.lru_cache is None:
self.lru_cache = LRUCache()
return self.lru_cache
def set(self, lru_cache):
self.lru_cache = lru_cache
ip_cache = LRUCacheSingleton()
def get_app_networking_mode(app):
mode = 'host'
if app.get('ipAddress'):
mode = 'container'
_mode = app.get('container', {})\
.get('docker', {})\
.get('network', '')
if _mode == 'USER':
mode = 'container'
elif _mode == 'BRIDGE':
mode = 'container/bridge'
networks = app.get('networks', [])
for n in networks:
# Modes cannot be mixed, so assigning the last mode is fine
mode = n.get('mode', 'container')
return mode
def get_task_ip(task, mode):
"""
:return: string, an empty string indicates that no ip was found.
"""
if mode == 'container':
task_ip_addresses = task.get('ipAddresses', [])
if len(task_ip_addresses) == 0:
logger.warning("Task %s does not yet have an ip address allocated",
task['id'])
return ""
task_ip = task_ip_addresses[0].get('ipAddress', "")
if task_ip == "":
logger.warning("Task %s does not yet have an ip address allocated",
task['id'])
return ""
return task_ip
else:
host = task.get('host', "")
if host == "":
logger.warning("Could not find task host, ignoring")
return ""
task_ip = resolve_ip(host)
if task_ip == "":
logger.warning("Could not resolve ip for host %s, ignoring",
host)
return ""
return task_ip
def get_app_port_mappings(app):
"""
:return: list
"""
portMappings = app.get('container', {})\
.get('docker', {})\
.get('portMappings', [])
if len(portMappings) > 0:
return portMappings
return app.get('container', {})\
.get('portMappings', [])
def get_task_ports(task):
"""
:return: list
"""
return task.get('ports', [])
def get_port_definition_ports(app):
"""
:return: list
"""
port_definitions = app.get('portDefinitions', [])
return [p['port'] for p in port_definitions if 'port' in p]
def get_ip_address_discovery_ports(app):
"""
:return: list
"""
ip_address = app.get('ipAddress', {})
if len(ip_address) == 0:
return []
discovery = app.get('ipAddress', {}).get('discovery', {})
return [int(p['number'])
for p in discovery.get('ports', [])
if 'number' in p]
def get_port_mapping_ports(app):
"""
:return: list
"""
port_mappings = get_app_port_mappings(app)
return [p['containerPort'] for p in port_mappings if 'containerPort' in p]
def get_app_task_ports(app, task, mode):
"""
:return: list
"""
if mode == 'host':
task_ports = get_task_ports(task)
if len(task_ports) > 0:
return task_ports
return get_port_definition_ports(app)
elif mode == 'container/bridge':
task_ports = get_task_ports(task)
if len(task_ports) > 0:
return task_ports
# Will only work for Marathon < 1.5
task_ports = get_port_definition_ports(app)
if len(task_ports) > 0:
return task_ports
return get_port_mapping_ports(app)
else:
task_ports = get_ip_address_discovery_ports(app)
if len(task_ports) > 0:
return task_ports
return get_port_mapping_ports(app)
def get_task_ip_and_ports(app, task):
"""
Return the IP address and list of ports used to access a task. For a
task using IP-per-task, this is the IP address of the task, and the ports
exposed by the task services. Otherwise, this is the IP address of the
host and the ports exposed by the host.
:param app: The application owning the task.
:param task: The task.
:return: Tuple of (ip address, [ports]). Returns (None, None) if no IP
address could be resolved or found for the task.
"""
mode = get_app_networking_mode(app)
task_ip = get_task_ip(task, mode)
task_ports = get_app_task_ports(app, task, mode)
# The overloading of empty string, and empty list as False is intentional.
if not (task_ip and task_ports):
return None, None
logger.debug("Returning: %r, %r", task_ip, task_ports)
return task_ip, task_ports
| {
"content_hash": "7d5532b4a8f99cd5e1077dcc6b98a449",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 79,
"avg_line_length": 32.91156462585034,
"alnum_prop": 0.5789582472095908,
"repo_name": "mesosphere/marathon-lb",
"id": "e75c75f3b76a04c72488beceaeff944fd930d9f5",
"size": "14538",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4139"
},
{
"name": "Lua",
"bytes": "4519"
},
{
"name": "Makefile",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "381924"
},
{
"name": "Ruby",
"bytes": "2773"
},
{
"name": "Shell",
"bytes": "17596"
}
],
"symlink_target": ""
} |
"""Tests for Konnected Alarm Panel config flow."""
from asynctest import patch
import pytest
from homeassistant.components import konnected
from homeassistant.components.konnected import config_flow
from tests.common import MockConfigEntry
@pytest.fixture(name="mock_panel")
async def mock_panel_fixture():
"""Mock a Konnected Panel bridge."""
with patch("konnected.Client", autospec=True) as konn_client:
def mock_constructor(host, port, websession):
"""Fake the panel constructor."""
konn_client.host = host
konn_client.port = port
return konn_client
konn_client.side_effect = mock_constructor
konn_client.ClientError = config_flow.CannotConnect
yield konn_client
async def test_flow_works(hass, mock_panel):
"""Test config flow ."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"port": 1234, "host": "1.2.3.4"}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel",
"host": "1.2.3.4",
"port": 1234,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"]["host"] == "1.2.3.4"
assert result["data"]["port"] == 1234
assert result["data"]["model"] == "Konnected"
assert len(result["data"]["access_token"]) == 20 # confirm generated token size
assert result["data"]["default_options"] == config_flow.OPTIONS_SCHEMA(
{config_flow.CONF_IO: {}}
)
async def test_pro_flow_works(hass, mock_panel):
"""Test config flow ."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"port": 1234, "host": "1.2.3.4"}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel Pro",
"host": "1.2.3.4",
"port": 1234,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"]["host"] == "1.2.3.4"
assert result["data"]["port"] == 1234
assert result["data"]["model"] == "Konnected Pro"
assert len(result["data"]["access_token"]) == 20 # confirm generated token size
assert result["data"]["default_options"] == config_flow.OPTIONS_SCHEMA(
{config_flow.CONF_IO: {}}
)
async def test_ssdp(hass, mock_panel):
"""Test a panel being discovered."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://1.2.3.4:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL,
},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel",
"host": "1.2.3.4",
"port": 1234,
}
async def test_import_no_host_user_finish(hass, mock_panel):
"""Test importing a panel with no host info."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
"default_options": {
"blink": True,
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# confirm user is prompted to enter host
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"host": "1.1.1.1", "port": 1234}
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["description_placeholders"] == {
"model": "Konnected Alarm Panel Pro",
"host": "1.1.1.1",
"port": 1234,
}
# final confirmation
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
async def test_ssdp_already_configured(hass, mock_panel):
"""Test if a discovered panel has already been configured."""
MockConfigEntry(
domain="konnected",
data={"host": "0.0.0.0", "port": 1234},
unique_id="112233445566",
).add_to_hass(hass)
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://0.0.0.0:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL_PRO,
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_ssdp_host_update(hass, mock_panel):
"""Test if a discovered panel has already been configured but changed host."""
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"2": "Binary Sensor",
"6": "Binary Sensor",
"10": "Binary Sensor",
"3": "Digital Sensor",
"7": "Digital Sensor",
"11": "Digital Sensor",
"4": "Switchable Output",
"out1": "Switchable Output",
"alarm1": "Switchable Output",
},
"binary_sensors": [
{"zone": "2", "type": "door"},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door"},
],
"sensors": [
{"zone": "3", "type": "dht"},
{"zone": "7", "type": "ds18b20", "name": "temper"},
{"zone": "11", "type": "dht"},
],
"switches": [
{"zone": "4"},
{
"zone": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "out1"},
{"zone": "alarm1"},
],
}
)
MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
).add_to_hass(hass)
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "ssdp"},
data={
"ssdp_location": "http://1.1.1.1:1234/Device.xml",
"manufacturer": config_flow.KONN_MANUFACTURER,
"modelName": config_flow.KONN_MODEL_PRO,
},
)
assert result["type"] == "abort"
# confirm the host value was updated
entry = hass.config_entries.async_entries(config_flow.DOMAIN)[0]
assert entry.data["host"] == "1.1.1.1"
assert entry.data["port"] == 1234
async def test_import_existing_config(hass, mock_panel):
"""Test importing a host with an existing config file."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data=konnected.DEVICE_SCHEMA_YAML(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"binary_sensors": [
{"zone": "2", "type": "door"},
{"zone": 6, "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door"},
],
"sensors": [
{"zone": "3", "type": "dht"},
{"zone": 7, "type": "ds18b20", "name": "temper"},
{"zone": "11", "type": "dht"},
],
"switches": [
{"zone": "4"},
{
"zone": 8,
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "out1"},
{"zone": "alarm1"},
],
}
),
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": result["data"]["access_token"],
"default_options": {
"io": {
"1": "Disabled",
"5": "Disabled",
"9": "Disabled",
"12": "Disabled",
"out": "Disabled",
"alarm2_out2": "Disabled",
"2": "Binary Sensor",
"6": "Binary Sensor",
"10": "Binary Sensor",
"3": "Digital Sensor",
"7": "Digital Sensor",
"11": "Digital Sensor",
"4": "Switchable Output",
"8": "Switchable Output",
"out1": "Switchable Output",
"alarm1": "Switchable Output",
},
"blink": True,
"discovery": True,
"binary_sensors": [
{"zone": "2", "type": "door", "inverse": False},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door", "inverse": False},
],
"sensors": [
{"zone": "3", "type": "dht", "poll_interval": 3},
{"zone": "7", "type": "ds18b20", "name": "temper", "poll_interval": 3},
{"zone": "11", "type": "dht", "poll_interval": 3},
],
"switches": [
{"activation": "high", "zone": "4"},
{
"zone": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"activation": "high", "zone": "out1"},
{"activation": "high", "zone": "alarm1"},
],
},
}
async def test_import_existing_config_entry(hass, mock_panel):
"""Test importing a host that has an existing config entry."""
MockConfigEntry(
domain="konnected",
data={
"host": "0.0.0.0",
"port": 1111,
"id": "112233445566",
"extra": "something",
},
unique_id="112233445566",
).add_to_hass(hass)
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
# utilize a global access token this time
hass.data[config_flow.DOMAIN] = {"access_token": "SUPERSECRETTOKEN"}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"default_options": {
"blink": True,
"discovery": True,
"io": {
"1": "Disabled",
"10": "Binary Sensor",
"11": "Disabled",
"12": "Disabled",
"2": "Binary Sensor",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Binary Sensor",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
"binary_sensors": [
{"inverse": False, "type": "door", "zone": "2"},
{"inverse": True, "type": "Window", "name": "winder", "zone": "6"},
{"inverse": False, "type": "door", "zone": "10"},
],
},
},
)
assert result["type"] == "abort"
# We should have updated the entry
assert len(hass.config_entries.async_entries("konnected")) == 1
assert hass.config_entries.async_entries("konnected")[0].data == {
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "SUPERSECRETTOKEN",
"extra": "something",
}
async def test_import_pin_config(hass, mock_panel):
"""Test importing a host with an existing config file that specifies pin configs."""
mock_panel.get_status.return_value = {
"mac": "11:22:33:44:55:66",
"name": "Konnected Pro",
}
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data=konnected.DEVICE_SCHEMA_YAML(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"binary_sensors": [
{"pin": 1, "type": "door"},
{"pin": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": 4, "type": "dht"},
{"pin": "7", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"pin": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
),
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": result["data"]["access_token"],
"default_options": {
"io": {
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"out1": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"blink": True,
"discovery": True,
"binary_sensors": [
{"zone": "1", "type": "door", "inverse": False},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door", "inverse": False},
],
"sensors": [
{"zone": "4", "type": "dht", "poll_interval": 3},
{"zone": "5", "type": "ds18b20", "name": "temper", "poll_interval": 3},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"activation": "high", "zone": "6"},
],
},
}
async def test_option_flow(hass, mock_panel):
"""Test config flow options."""
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA({"io": {}})
entry = MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"1": "Disabled",
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"5": "Disabled",
"6": "Binary Sensor",
"out": "Switchable Output",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 2
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 6
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"type": "window", "name": "winder", "inverse": True},
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 3
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "dht"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 4
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone out
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"blink": True},
)
assert result["type"] == "create_entry"
assert result["data"] == {
"io": {
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"6": "Binary Sensor",
"out": "Switchable Output",
},
"blink": True,
"binary_sensors": [
{"zone": "2", "type": "door", "inverse": False},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
],
"sensors": [{"zone": "3", "type": "dht", "poll_interval": 3}],
"switches": [
{"activation": "high", "zone": "4"},
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
],
}
async def test_option_flow_pro(hass, mock_panel):
"""Test config flow options for pro board."""
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA({"io": {}})
entry = MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"1": "Disabled",
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"5": "Disabled",
"6": "Binary Sensor",
"7": "Digital Sensor",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_io_ext"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"8": "Switchable Output",
"9": "Disabled",
"10": "Binary Sensor",
"11": "Digital Sensor",
"12": "Disabled",
"out1": "Switchable Output",
"alarm1": "Switchable Output",
"alarm2_out2": "Disabled",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 2
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 6
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"type": "window", "name": "winder", "inverse": True},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 10
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 3
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "dht"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 7
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "ds18b20", "name": "temper"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 11
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "dht"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 4
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 8
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone out1
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone alarm1
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"blink": True},
)
assert result["type"] == "create_entry"
assert result["data"] == {
"io": {
"10": "Binary Sensor",
"11": "Digital Sensor",
"2": "Binary Sensor",
"3": "Digital Sensor",
"4": "Switchable Output",
"6": "Binary Sensor",
"7": "Digital Sensor",
"8": "Switchable Output",
"alarm1": "Switchable Output",
"out1": "Switchable Output",
},
"blink": True,
"binary_sensors": [
{"zone": "2", "type": "door", "inverse": False},
{"zone": "6", "type": "window", "name": "winder", "inverse": True},
{"zone": "10", "type": "door", "inverse": False},
],
"sensors": [
{"zone": "3", "type": "dht", "poll_interval": 3},
{"zone": "7", "type": "ds18b20", "name": "temper", "poll_interval": 3},
{"zone": "11", "type": "dht", "poll_interval": 3},
],
"switches": [
{"activation": "high", "zone": "4"},
{
"zone": "8",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"activation": "high", "zone": "out1"},
{"activation": "high", "zone": "alarm1"},
],
}
async def test_option_flow_import(hass, mock_panel):
"""Test config flow options imported from configuration.yaml."""
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Digital Sensor",
"3": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "window", "name": "winder", "inverse": True},
],
"sensors": [{"zone": "2", "type": "ds18b20", "name": "temper"}],
"switches": [
{
"zone": "3",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
],
}
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": device_options,
}
)
entry = MockConfigEntry(
domain="konnected", data=device_config, unique_id="112233445566"
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
# confirm the defaults are set based on current config - we"ll spot check this throughout
schema = result["data_schema"]({})
assert schema["1"] == "Binary Sensor"
assert schema["2"] == "Digital Sensor"
assert schema["3"] == "Switchable Output"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"1": "Binary Sensor",
"2": "Digital Sensor",
"3": "Switchable Output",
},
)
assert result["type"] == "form"
assert result["step_id"] == "options_io_ext"
schema = result["data_schema"]({})
assert schema["8"] == "Disabled"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={},
)
assert result["type"] == "form"
assert result["step_id"] == "options_binary"
# zone 1
schema = result["data_schema"]({})
assert schema["type"] == "window"
assert schema["name"] == "winder"
assert schema["inverse"] is True
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "door"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_digital"
# zone 2
schema = result["data_schema"]({})
assert schema["type"] == "ds18b20"
assert schema["name"] == "temper"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"type": "dht"},
)
assert result["type"] == "form"
assert result["step_id"] == "options_switch"
# zone 3
schema = result["data_schema"]({})
assert schema["name"] == "switcher"
assert schema["activation"] == "low"
assert schema["momentary"] == 50
assert schema["pause"] == 100
assert schema["repeat"] == 4
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"activation": "high"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_misc"
schema = result["data_schema"]({})
assert schema["blink"] is True
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"blink": False},
)
# verify the updated fields
assert result["type"] == "create_entry"
assert result["data"] == {
"io": {"1": "Binary Sensor", "2": "Digital Sensor", "3": "Switchable Output"},
"blink": False,
"binary_sensors": [
{"zone": "1", "type": "door", "inverse": True, "name": "winder"},
],
"sensors": [
{"zone": "2", "type": "dht", "poll_interval": 3, "name": "temper"},
],
"switches": [
{
"zone": "3",
"name": "switcher",
"activation": "high",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
],
}
async def test_option_flow_existing(hass, mock_panel):
"""Test config flow options with existing already in place."""
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Digital Sensor",
"3": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "window", "name": "winder", "inverse": True},
],
"sensors": [{"zone": "2", "type": "ds18b20", "name": "temper"}],
"switches": [
{
"zone": "3",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
],
}
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "11223344556677889900",
"default_options": config_flow.OPTIONS_SCHEMA({"io": {}}),
}
)
entry = MockConfigEntry(
domain="konnected",
data=device_config,
options=device_options,
unique_id="112233445566",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}
)
assert result["type"] == "form"
assert result["step_id"] == "options_io"
# confirm the defaults are pulled in from the existing options
schema = result["data_schema"]({})
assert schema["1"] == "Binary Sensor"
assert schema["2"] == "Digital Sensor"
assert schema["3"] == "Switchable Output"
| {
"content_hash": "11d7018f18c99dff67aabfbb9fbccf31",
"timestamp": "",
"source": "github",
"line_count": 1052,
"max_line_length": 93,
"avg_line_length": 32.599809885931556,
"alnum_prop": 0.47662924624580844,
"repo_name": "postlund/home-assistant",
"id": "9b7a498731d0932d17da982a5a2eac4216e213ff",
"size": "34295",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/konnected/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
import pytest
from flask_appbuilder.security.sqla.models import Role
from parameterized import parameterized
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.security import permissions
from airflow.www.security import EXISTING_ROLES
from tests.test_utils.api_connexion_utils import (
assert_401,
create_role,
create_user,
delete_role,
delete_user,
)
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PERMISSION),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
class TestRoleEndpoint:
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app) -> None:
self.app = configured_app
self.client = self.app.test_client() # type:ignore
def teardown_method(self):
"""
Delete all roles except these ones.
Test and TestNoPermissions are deleted by delete_user above
"""
session = self.app.appbuilder.get_session
existing_roles = set(EXISTING_ROLES)
existing_roles.update(['Test', 'TestNoPermissions'])
roles = session.query(Role).filter(~Role.name.in_(existing_roles)).all()
for role in roles:
delete_role(self.app, role.name)
class TestGetRoleEndpoint(TestRoleEndpoint):
def test_should_response_200(self):
response = self.client.get("/api/v1/roles/Admin", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json['name'] == "Admin"
def test_should_respond_404(self):
response = self.client.get("/api/v1/roles/invalid-role", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
assert {
'detail': "The Role with name `invalid-role` was not found",
'status': 404,
'title': 'Role not found',
'type': EXCEPTIONS_LINK_MAP[404],
} == response.json
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/roles/Admin")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/roles/Admin", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetRolesEndpoint(TestRoleEndpoint):
def test_should_response_200(self):
response = self.client.get("/api/v1/roles", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
existing_roles = set(EXISTING_ROLES)
existing_roles.update(['Test', 'TestNoPermissions'])
assert response.json['total_entries'] == len(existing_roles)
roles = {role['name'] for role in response.json['roles']}
assert roles == existing_roles
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/roles")
assert_401(response)
def test_should_raises_400_for_invalid_order_by(self):
response = self.client.get(
"/api/v1/roles?order_by=invalid", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 400
msg = "Ordering with 'invalid' is disallowed or the attribute does not exist on the model"
assert response.json['detail'] == msg
def test_should_raise_403_forbidden(self):
response = self.client.get("/api/v1/roles", environ_overrides={'REMOTE_USER': "test_no_permissions"})
assert response.status_code == 403
class TestGetRolesEndpointPaginationandFilter(TestRoleEndpoint):
@parameterized.expand(
[
("/api/v1/roles?limit=1", ['Admin']),
("/api/v1/roles?limit=2", ['Admin', "Op"]),
(
"/api/v1/roles?offset=1",
['Op', 'Public', 'Test', 'TestNoPermissions', 'User', 'Viewer'],
),
(
"/api/v1/roles?offset=0",
['Admin', 'Op', 'Public', 'Test', 'TestNoPermissions', 'User', 'Viewer'],
),
("/api/v1/roles?limit=1&offset=2", ["Public"]),
("/api/v1/roles?limit=1&offset=1", ["Op"]),
(
"/api/v1/roles?limit=2&offset=2",
['Public', 'Test'],
),
]
)
def test_can_handle_limit_and_offset(self, url, expected_roles):
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
existing_roles = set(EXISTING_ROLES)
existing_roles.update(['Test', 'TestNoPermissions'])
assert response.json['total_entries'] == len(existing_roles)
roles = [role['name'] for role in response.json['roles'] if role]
assert roles == expected_roles
class TestGetPermissionsEndpoint(TestRoleEndpoint):
def test_should_response_200(self):
response = self.client.get("/api/v1/permissions", environ_overrides={'REMOTE_USER': "test"})
actions = {i[0] for i in self.app.appbuilder.sm.get_all_permissions() if i}
assert response.status_code == 200
assert response.json['total_entries'] == len(actions)
returned_actions = {perm['name'] for perm in response.json['actions']}
assert actions == returned_actions
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/permissions")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/permissions", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestPostRole(TestRoleEndpoint):
def test_post_should_respond_200(self):
payload = {
'name': 'Test2',
'actions': [{'resource': {'name': 'Connections'}, 'action': {'name': 'can_create'}}],
}
response = self.client.post("/api/v1/roles", json=payload, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
role = self.app.appbuilder.sm.find_role('Test2')
assert role is not None
@parameterized.expand(
[
(
{
'actions': [{'resource': {'name': 'Connections'}, 'action': {'name': 'can_create'}}],
},
"{'name': ['Missing data for required field.']}",
),
(
{
'name': "TestRole",
'actionss': [
{
'resource': {'name': 'Connections'}, # actionss not correct
'action': {'name': 'can_create'},
}
],
},
"{'actionss': ['Unknown field.']}",
),
(
{
'name': "TestRole",
'actions': [
{
'resources': {'name': 'Connections'}, # resources is invalid, should be resource
'action': {'name': 'can_create'},
}
],
},
"{'actions': {0: {'resources': ['Unknown field.']}}}",
),
(
{
'name': "TestRole",
'actions': [
{'resource': {'name': 'Connections'}, 'actions': {'name': 'can_create'}}
], # actions is invalid, should be action
},
"{'actions': {0: {'actions': ['Unknown field.']}}}",
),
(
{
'name': "TestRole",
'actions': [
{
'resource': {'name': 'FooBars'}, # FooBars is not a resource
'action': {'name': 'can_create'},
}
],
},
"The specified resource: 'FooBars' was not found",
),
(
{
'name': "TestRole",
'actions': [
{'resource': {'name': 'Connections'}, 'action': {'name': 'can_amend'}}
], # can_amend is not an action
},
"The specified action: 'can_amend' was not found",
),
]
)
def test_post_should_respond_400_for_invalid_payload(self, payload, error_message):
response = self.client.post("/api/v1/roles", json=payload, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 400
assert response.json == {
'detail': error_message,
'status': 400,
'title': 'Bad Request',
'type': EXCEPTIONS_LINK_MAP[400],
}
def test_post_should_respond_409_already_exist(self):
payload = {
'name': 'Test',
'actions': [{'resource': {'name': 'Connections'}, 'action': {'name': 'can_create'}}],
}
response = self.client.post("/api/v1/roles", json=payload, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 409
assert response.json == {
'detail': "Role with name `Test` already exist. Please update with patch endpoint",
'status': 409,
'title': 'Conflict',
'type': EXCEPTIONS_LINK_MAP[409],
}
def test_should_raises_401_unauthenticated(self):
response = self.client.post(
"/api/v1/roles",
json={
'name': 'Test2',
'actions': [{'resource': {'name': 'Connections'}, 'action': {'name': 'can_create'}}],
},
)
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.post(
"/api/v1/roles",
json={
"name": "mytest2",
"actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
},
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
class TestDeleteRole(TestRoleEndpoint):
def test_delete_should_respond_204(self, session):
role = create_role(self.app, "mytestrole")
response = self.client.delete(f"/api/v1/roles/{role.name}", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 204
role_obj = session.query(Role).filter(Role.name == role.name).all()
assert len(role_obj) == 0
def test_delete_should_respond_404(self):
response = self.client.delete(
"/api/v1/roles/invalidrolename", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
assert response.json == {
'detail': "The Role with name `invalidrolename` was not found",
'status': 404,
'title': 'Role not found',
'type': EXCEPTIONS_LINK_MAP[404],
}
def test_should_raises_401_unauthenticated(self):
response = self.client.delete("/api/v1/roles/test")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.delete(
"/api/v1/roles/test", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestPatchRole(TestRoleEndpoint):
@parameterized.expand(
[
({"name": "mytest"}, "mytest", []),
(
{
"name": "mytest2",
"actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
},
"mytest2",
[{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
),
]
)
def test_patch_should_respond_200(self, payload, expected_name, expected_actions):
role = create_role(self.app, 'mytestrole')
response = self.client.patch(
f"/api/v1/roles/{role.name}", json=payload, environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json['name'] == expected_name
assert response.json["actions"] == expected_actions
@parameterized.expand(
[
(
"?update_mask=name",
{
"name": "mytest2",
"actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
},
"mytest2",
[],
),
(
"?update_mask=name, actions", # both name and actions in update mask
{
"name": "mytest2",
"actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
},
"mytest2",
[{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
),
]
)
def test_patch_should_respond_200_with_update_mask(
self, update_mask, payload, expected_name, expected_actions
):
role = create_role(self.app, "mytestrole")
assert role.permissions == []
response = self.client.patch(
f"/api/v1/roles/{role.name}{update_mask}",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
assert response.json['name'] == expected_name
assert response.json['actions'] == expected_actions
def test_patch_should_respond_400_for_invalid_fields_in_update_mask(self):
role = create_role(self.app, "mytestrole")
payload = {"name": "testme"}
response = self.client.patch(
f"/api/v1/roles/{role.name}?update_mask=invalid_name",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 400
assert response.json['detail'] == "'invalid_name' in update_mask is unknown"
@parameterized.expand(
[
(
{
"name": "testme",
"permissions": [ # Using permissions instead of actions should raise
{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}
],
},
"{'permissions': ['Unknown field.']}",
),
(
{
"name": "testme",
"actions": [
{
"view_menu": {"name": "Connections"}, # Using view_menu instead of resource
"action": {"name": "can_create"},
}
],
},
"{'actions': {0: {'view_menu': ['Unknown field.']}}}",
),
(
{
"name": "testme",
"actions": [
{
"resource": {"name": "FooBars"}, # Using wrong resource name
"action": {"name": "can_create"},
}
],
},
"The specified resource: 'FooBars' was not found",
),
(
{
"name": "testme",
"actions": [
{
"resource": {"name": "Connections"}, # Using wrong action name
"action": {"name": "can_invalid"},
}
],
},
"The specified action: 'can_invalid' was not found",
),
]
)
def test_patch_should_respond_400_for_invalid_update(self, payload, expected_error):
role = create_role(self.app, "mytestrole")
response = self.client.patch(
f"/api/v1/roles/{role.name}",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 400
assert response.json['detail'] == expected_error
def test_should_raises_401_unauthenticated(self):
response = self.client.patch(
"/api/v1/roles/test",
json={
"name": "mytest2",
"actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
},
)
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.patch(
"/api/v1/roles/test",
json={
"name": "mytest2",
"actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}],
},
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
| {
"content_hash": "4564ba8dc99b0f9e4dc99cefa5027a4e",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 110,
"avg_line_length": 38.143468950749465,
"alnum_prop": 0.5104137427721327,
"repo_name": "sekikn/incubator-airflow",
"id": "0d41c927ac69e8e7faabe0876ef17dfbf69c6f33",
"size": "18599",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/api_connexion/endpoints/test_role_and_permission_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
https://archive.siam.org/books/kelley/fr16/
"""
# Can't use default parameters because it's being explicitly passed as None
# from the calling function, so we need to set it here.
tol_norm = maxnorm if tol_norm is None else tol_norm
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.full_like(x, np.inf)
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in range(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
n, tol_norm(Fx), s))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition:
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with SciPy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian:
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc., algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian:
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix:
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in range(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (i.e., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden1'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.broyden1(fun, [0, 0])
>>> sol
array([0.84116396, 0.15883641])
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
return r
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden2'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.broyden2(fun, [0, 0])
>>> sol
array([0.84116365, 0.15883529])
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='anderson'`` in particular.
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.anderson(fun, [0, 0])
>>> sol
array([0.84116588, 0.15883789])
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in range(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in range(n):
for j in range(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in range(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in range(n):
for j in range(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='diagbroyden'`` in particular.
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.diagbroyden(fun, [0, 0])
>>> sol
array([0.84116403, 0.15883384])
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='linearmixing'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(np.full(self.shape[0], -1/self.alpha))
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='excitingmixing'`` in particular.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : str or callable, optional
Krylov method to use to approximate the Jacobian. Can be a string,
or a function implementing the same interface as the iterative
solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
``'tfqmr'``.
The default is `scipy.sparse.linalg.lgmres`.
inner_maxiter : int, optional
Parameter to pass to the "inner" Krylov solver: maximum number of
iterations. Iteration will stop after maxiter steps even if the
specified tolerance has not been achieved.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
inner_kwargs : kwargs
Keyword parameters for the "inner" Krylov solver
(defined with `method`). Parameter names must start with
the `inner_` prefix which will be stripped before passing on
the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='krylov'`` in particular.
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
SIAM, pp.57-83, 2003.
:doi:`10.1137/1.9780898718898.ch3`
.. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * x[1] - 1.0,
... 0.5 * (x[1] - x[0]) ** 2]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.newton_krylov(fun, [0, 0])
>>> sol
array([0.66731771, 0.66536458])
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
# Note that this retrieves one of the named functions, or otherwise
# uses `method` as is (i.e., for a user-provided callable).
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
tfqmr=scipy.sparse.linalg.tfqmr,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restart'] = inner_maxiter
self.method_kw['maxiter'] = 1
self.method_kw.setdefault('atol', 0)
elif self.method in (scipy.sparse.linalg.gcrotmk,
scipy.sparse.linalg.bicgstab,
scipy.sparse.linalg.cgs):
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See e.g., Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
self.method_kw.setdefault('atol', 0)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and Jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
signature = _getfullargspec(jac.__init__)
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
if kwonlyargs:
raise ValueError('Unexpected signature %s' % signature)
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| {
"content_hash": "0afd6c22b29dff648ee790f42673daeb",
"timestamp": "",
"source": "github",
"line_count": 1562,
"max_line_length": 104,
"avg_line_length": 31.303457106274006,
"alnum_prop": 0.5429073952879581,
"repo_name": "perimosocordiae/scipy",
"id": "c334f81f2e32408fccb639f710c6d2a39f71d049",
"size": "48994",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "scipy/optimize/_nonlin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4620237"
},
{
"name": "C++",
"bytes": "959068"
},
{
"name": "Cython",
"bytes": "1059810"
},
{
"name": "Dockerfile",
"bytes": "16894"
},
{
"name": "Fortran",
"bytes": "5211680"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "143727"
},
{
"name": "Python",
"bytes": "15434780"
},
{
"name": "R",
"bytes": "3059"
},
{
"name": "Shell",
"bytes": "18009"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
def func():
"""Test that a variable defined in a finally clause does not trigger a false positive"""
try:
variable = 1
yield variable
finally:
variable = 2
yield variable
| {
"content_hash": "e51cab2887c067439072df2096af044c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 92,
"avg_line_length": 26.875,
"alnum_prop": 0.6,
"repo_name": "arju88nair/projectCulminate",
"id": "efc133c2b0171e502c2e3ebdd7aa6a4057955f97",
"size": "251",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/pylint/test/functional/used_before_assignment_488.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "365921"
},
{
"name": "C++",
"bytes": "237910"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Makefile",
"bytes": "90112"
},
{
"name": "Python",
"bytes": "15199371"
},
{
"name": "Shell",
"bytes": "17795"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import lowfat.validator
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0096_auto_20170316_1015'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='draft_url',
field=models.CharField(max_length=120, validators=[lowfat.validator.online_document]),
),
migrations.AlterField(
model_name='generalsentmail',
name='sender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='historicalblog',
name='draft_url',
field=models.CharField(max_length=120, validators=[lowfat.validator.online_document]),
),
]
| {
"content_hash": "e9a399fa5416477b5c935c8e9b1cccf4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 121,
"avg_line_length": 31,
"alnum_prop": 0.6326742976066597,
"repo_name": "softwaresaved/fat",
"id": "dbc42c58ee4cdaa1639a455585b32895d1912dcb",
"size": "1034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lowfat/migrations/0097_auto_20170405_1415.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3202"
},
{
"name": "HTML",
"bytes": "38552"
},
{
"name": "JavaScript",
"bytes": "653"
},
{
"name": "Python",
"bytes": "235043"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
} |
import time
class ZmqTimerManager(object):
def __init__(self):
self.timers = []
self.next_call = 0
def add_timer(self, timer):
self.timers.append(timer)
def check(self):
if time.time() > self.next_call:
for timer in self.timers:
timer.check()
def get_next_interval(self):
if time.time() >= self.next_call:
call_times = []
for timer in self.timers:
call_times.append(timer.get_next_call())
self.next_call = min(call_times)
if self.next_call < time.time():
val = 1
else:
val = (self.next_call - time.time()) * 1000
else:
val = (self.next_call - time.time()) * 1000
if val < 1:
val = 1
return val
class ZmqTimer(object):
def __init__(self, interval, callback):
self.interval = interval
self.callback = callback
self.last_call = 0
def check(self):
if time.time() > (self.interval + self.last_call):
self.callback()
self.last_call = time.time()
def get_next_call(self):
return self.last_call + self.interval
| {
"content_hash": "645f17a61088cc76ad66df627c00f928",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 59,
"avg_line_length": 26.695652173913043,
"alnum_prop": 0.5171009771986971,
"repo_name": "kootenpv/brightml",
"id": "2d0a555abd4747eb5a9d448aa3ff285addb701f6",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brightml/timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7476"
},
{
"name": "Makefile",
"bytes": "89"
},
{
"name": "Python",
"bytes": "51745"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(24,GPIO.OUT)
GPIO.output(24,GPIO.HIGH)
| {
"content_hash": "89db6f2988d7b6d97c71e9400aa264ae",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 25,
"avg_line_length": 24.2,
"alnum_prop": 0.7851239669421488,
"repo_name": "goatgoose1142/MorseCodeTransmitter",
"id": "230a7d930ca9f9b4ead31f3ce731fb89c35cce16",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/LEDON.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2042"
},
{
"name": "HTML",
"bytes": "677"
},
{
"name": "JavaScript",
"bytes": "10055"
},
{
"name": "Python",
"bytes": "240"
}
],
"symlink_target": ""
} |
import unittest
from auto_tagify2 import AutoTagify
class AutoTagifyTestCase(unittest.TestCase):
def testTextNotEmptyStrict(self):
"""Verify strict text returns content, if text is provided and
not null.
"""
a = AutoTagify()
a.text = 'This is a test'
a.css = 'taggable'
self.assertEqual(a.tag_list(), ['test'])
self.assertEqual(a.generate(), 'This is a <a href="/test" class="taggable">test</a> ')
def testTextNotEmptyNotStrict(self):
"""Verify non-strict text returns content, if text is provided and
not null.
"""
a = AutoTagify()
a.text = 'These are my tests'
a.css = 'taggable'
self.assertEqual(a.tag_list(), ['are', 'test'])
self.assertEqual(a.generate(strict=False),
'These <a href="/are" class="taggable">are</a> my <a href="/tests" class="taggable">tests</a> ')
def testTextEmpty(self):
"""Verify sending no text returns nothing."""
a = AutoTagify()
self.assertEqual(a.generate(), '')
def testTagsNotEmpty(self):
"""Verify that tags are returned."""
a = AutoTagify()
a.text = 'This is a test with other valid tags'
test_array = ['test', 'other', 'valid', 'tag']
self.assertEqual(a.tag_list(), test_array)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "6a7a5c365bf6a4026ddcbccf8ecc274c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 121,
"avg_line_length": 35.075,
"alnum_prop": 0.5801853171774768,
"repo_name": "ednapiranha/auto-tagify",
"id": "c6462b80121490271dd92f05ae373c287286c349",
"size": "1403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto_tagify2/auto_tagify2tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6997"
}
],
"symlink_target": ""
} |
import posixpath
import traceback
from future import Gettable, Future
from path_util import (
AssertIsDirectory, AssertIsValid, IsDirectory, IsValid, SplitParent,
ToDirectory)
class _BaseFileSystemException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
@classmethod
def RaiseInFuture(cls, message):
stack = traceback.format_stack()
def boom(): raise cls('%s. Creation stack:\n%s' % (message, ''.join(stack)))
return Future(delegate=Gettable(boom))
class FileNotFoundError(_BaseFileSystemException):
'''Raised when a file isn't found for read or stat.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class FileSystemError(_BaseFileSystemException):
'''Raised on when there are errors reading or statting files, such as a
network timeout.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class StatInfo(object):
'''The result of calling Stat on a FileSystem.
'''
def __init__(self, version, child_versions=None):
if child_versions:
assert all(IsValid(path) for path in child_versions.iterkeys()), \
child_versions
self.version = version
self.child_versions = child_versions
def __eq__(self, other):
return (isinstance(other, StatInfo) and
self.version == other.version and
self.child_versions == other.child_versions)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return '{version: %s, child_versions: %s}' % (self.version,
self.child_versions)
def __repr__(self):
return str(self)
class FileSystem(object):
'''A FileSystem interface that can read files and directories.
'''
def Read(self, paths):
'''Reads each file in paths and returns a dictionary mapping the path to the
contents. If a path in paths ends with a '/', it is assumed to be a
directory, and a list of files in the directory is mapped to the path.
The contents will be a str.
If any path cannot be found, raises a FileNotFoundError. This is guaranteed
to only happen once the Future has been resolved (Get() called).
For any other failure, raises a FileSystemError.
'''
raise NotImplementedError(self.__class__)
def ReadSingle(self, path):
'''Reads a single file from the FileSystem. Returns a Future with the same
rules as Read().
'''
AssertIsValid(path)
read_single = self.Read([path])
return Future(delegate=Gettable(lambda: read_single.Get()[path]))
def Exists(self, path):
'''Returns a Future to the existence of |path|; True if |path| exists,
False if not. This method will not throw a FileNotFoundError unlike
the Read* methods, however it may still throw a FileSystemError.
There are several ways to implement this method via the interface but this
method exists to do so in a canonical and most efficient way for caching.
'''
AssertIsValid(path)
if path == '':
# There is always a root directory.
return Future(value=True)
parent, base = SplitParent(path)
list_future = self.ReadSingle(ToDirectory(parent))
def resolve():
try:
return base in list_future.Get()
except FileNotFoundError:
return False
return Future(delegate=Gettable(resolve))
def Refresh(self):
'''Asynchronously refreshes the content of the FileSystem, returning a
future to its completion.
'''
raise NotImplementedError(self.__class__)
# TODO(cduvall): Allow Stat to take a list of paths like Read.
def Stat(self, path):
'''Returns a |StatInfo| object containing the version of |path|. If |path|
is a directory, |StatInfo| will have the versions of all the children of
the directory in |StatInfo.child_versions|.
If the path cannot be found, raises a FileNotFoundError.
For any other failure, raises a FileSystemError.
'''
raise NotImplementedError(self.__class__)
def GetIdentity(self):
'''The identity of the file system, exposed for caching classes to
namespace their caches. this will usually depend on the configuration of
that file system - e.g. a LocalFileSystem with a base path of /var is
different to that of a SubversionFileSystem with a base path of /bar, is
different to a LocalFileSystem with a base path of /usr.
'''
raise NotImplementedError(self.__class__)
def Walk(self, root):
'''Recursively walk the directories in a file system, starting with root.
Behaviour is very similar to os.walk from the standard os module, yielding
(base, dirs, files) recursively, where |base| is the base path of |files|,
|dirs| relative to |root|, and |files| and |dirs| the list of files/dirs in
|base| respectively.
Note that directories will always end with a '/', files never will.
If |root| cannot be found, raises a FileNotFoundError.
For any other failure, raises a FileSystemError.
'''
AssertIsDirectory(root)
basepath = root
def walk(root):
AssertIsDirectory(root)
dirs, files = [], []
for f in self.ReadSingle(root).Get():
if IsDirectory(f):
dirs.append(f)
else:
files.append(f)
yield root[len(basepath):].rstrip('/'), dirs, files
for d in dirs:
for walkinfo in walk(root + d):
yield walkinfo
for walkinfo in walk(root):
yield walkinfo
def __repr__(self):
return '<%s>' % type(self).__name__
def __str__(self):
return repr(self)
| {
"content_hash": "a4aa0584d061c9705c39fdd54782efab",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 80,
"avg_line_length": 32.310344827586206,
"alnum_prop": 0.6700462468872287,
"repo_name": "ChromiumWebApps/chromium",
"id": "c2da419e9ed2e40c31d10cf54ff36e9db6892faf",
"size": "5789",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chrome/common/extensions/docs/server2/file_system.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42286199"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "198616766"
},
{
"name": "CSS",
"bytes": "937333"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5695686"
},
{
"name": "JavaScript",
"bytes": "21967126"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2262"
},
{
"name": "Objective-C",
"bytes": "7602057"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1210885"
},
{
"name": "Python",
"bytes": "10774996"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1316721"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15243"
}
],
"symlink_target": ""
} |
"""This example gets custom targeting values for the given predefined custom
targeting key. The statement retrieves up to the maximum page size limit of
500. To create custom targeting values, run
create_custom_targeting_keys_and_values.py. To determine which custom
targeting keys exist, run get_all_custom_targeting_keys_and_values.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201211')
key_id = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
filter_statement = {'query': 'WHERE customTargetingKeyId = :keyId LIMIT 500',
'values': values}
# Get custom targeting values by statement.
response = custom_targeting_service.GetCustomTargetingValuesByStatement(
filter_statement)[0]
values = []
if 'results' in response:
values = response['results']
# Display results.
if values:
for value in values:
print ('Custom targeting value with id \'%s\', name \'%s\', and display '
'name \'%s\' was found.'
% (value['id'], value['name'], value['displayName']))
else:
print 'No values were found.'
| {
"content_hash": "cc0aa7eddff57add92bcf6c7cb77ee08",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 32.68627450980392,
"alnum_prop": 0.6838632273545291,
"repo_name": "caioserra/apiAdwords",
"id": "58f74efb5011372cfd52786f9b8d7cf835ebed12",
"size": "2285",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201211/get_custom_targeting_values_by_statement.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
} |
"""installs a WSGI application in place of a real URI for testing.
Introduction
============
Testing a WSGI application normally involves starting a server at a
local host and port, then pointing your test code to that address.
Instead, this library lets you intercept calls to any specific host/port
combination and redirect them into a `WSGI application`_ importable by
your test program. Thus, you can avoid spawning multiple processes or
threads to test your Web app.
How Does It Work?
=================
``wsgi_intercept`` works by replacing ``httplib.HTTPConnection`` with a
subclass, ``wsgi_intercept.WSGI_HTTPConnection``. This class then
redirects specific server/port combinations into a WSGI application by
emulating a socket. If no intercept is registered for the host and port
requested, those requests are passed on to the standard handler.
The functions ``add_wsgi_intercept(host, port, app_create_fn,
script_name='')`` and ``remove_wsgi_intercept(host,port)`` specify
which URLs should be redirect into what applications. Note especially
that ``app_create_fn`` is a *function object* returning a WSGI
application; ``script_name`` becomes ``SCRIPT_NAME`` in the WSGI app's
environment, if set.
Install
=======
::
pip install -U wsgi_intercept
Packages Intercepted
====================
Unfortunately each of the Web testing frameworks uses its own specific
mechanism for making HTTP call-outs, so individual implementations are
needed. At this time there are implementations for ``httplib2`` and
``requests`` in both Python 2 and 3, ``urllib2`` and ``httplib``
in Python 2 and ``urllib.request`` and ``http.client`` in Python 3.
If you are using Python 2 and need support for a different HTTP
client, require a version of ``wsgi_intercept<0.6``. Earlier versions
include support for ``webtest``, ``webunit`` and ``zope.testbrowser``.
It is quite likely that support for these versions will be relatively
easy to add back in to the new version.
The best way to figure out how to use interception is to inspect
`the tests`_. More comprehensive documentation available upon
request.
.. _the tests: https://github.com/cdent/python3-wsgi-intercept/tree/master/test
History
=======
Pursuant to Ian Bicking's `"best Web testing framework"`_ post, Titus
Brown put together an `in-process HTTP-to-WSGI interception mechanism`_
for his own Web testing system, twill_. Because the mechanism is pretty
generic -- it works at the httplib level -- Titus decided to try adding
it into all of the *other* Python Web testing frameworks.
The Python 2 version of wsgi-intercept was the result. Kumar McMillan
later took over maintenance.
The current version works with Python 2.6, 2.7, 3.3 and 3.4 and was assembled
by `Chris Dent`_. Testing and documentation improvements from `Sasha Hart`_.
.. _twill: http://www.idyll.org/~t/www-tools/twill.html
.. _"best Web testing framework": http://blog.ianbicking.org/best-of-the-web-app-test-frameworks.html
.. _in-process HTTP-to-WSGI interception mechanism: http://www.advogato.org/person/titus/diary.html?start=119
.. _WSGI application: http://www.python.org/peps/pep-3333.html
.. _Chris Dent: https://github.com/cdent
.. _Sasha Hart: https://github.com/sashahart
Project Home
============
This project lives on `GitHub`_. Please submit all bugs, patches,
failing tests, et cetera using the Issue Tracker.
Additional documentation is available on `Read The Docs`_.
.. _GitHub: http://github.com/cdent/python3-wsgi-intercept
.. _Read The Docs: http://wsgi-intercept.readthedocs.org/en/latest/
"""
from __future__ import print_function
__version__ = '0.10.0'
import sys
try:
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
from httplib import HTTPConnection, HTTPSConnection
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
from urllib.parse import unquote_to_bytes as url_unquote
except ImportError:
from urllib import unquote as url_unquote
import traceback
debuglevel = 0
# 1 basic
# 2 verbose
####
#
# Specify which hosts/ports to target for interception to a given WSGI app.
#
# For simplicity's sake, intercept ENTIRE host/port combinations;
# intercepting only specific URL subtrees gets complicated, because we don't
# have that information in the HTTPConnection.connect() function that does the
# redirection.
#
# format: key=(host, port), value=(create_app, top_url)
#
# (top_url becomes the SCRIPT_NAME)
_wsgi_intercept = {}
def add_wsgi_intercept(host, port, app_create_fn, script_name=''):
"""
Add a WSGI intercept call for host:port, using the app returned
by app_create_fn with a SCRIPT_NAME of 'script_name' (default '').
"""
_wsgi_intercept[(host, port)] = (app_create_fn, script_name)
def remove_wsgi_intercept(*args):
"""
Remove the WSGI intercept call for (host, port). If no arguments are
given, removes all intercepts
"""
global _wsgi_intercept
if len(args) == 0:
_wsgi_intercept = {}
else:
key = (args[0], args[1])
if key in _wsgi_intercept:
del _wsgi_intercept[key]
#
# make_environ: behave like a Web server. Take in 'input', and behave
# as if you're bound to 'host' and 'port'; build an environment dict
# for the WSGI app.
#
# This is where the magic happens, folks.
#
def make_environ(inp, host, port, script_name):
"""
Take 'inp' as if it were HTTP-speak being received on host:port,
and parse it into a WSGI-ok environment dictionary. Return the
dictionary.
Set 'SCRIPT_NAME' from the 'script_name' input, and, if present,
remove it from the beginning of the PATH_INFO variable.
"""
#
# parse the input up to the first blank line (or its end).
#
environ = {}
method_line = inp.readline()
if sys.version_info[0] > 2:
method_line = method_line.decode('ISO-8859-1')
content_type = None
content_length = None
cookies = []
for line in inp:
if not line.strip():
break
k, v = line.strip().split(b':', 1)
v = v.lstrip()
v = v.decode('ISO-8859-1')
#
# take care of special headers, and for the rest, put them
# into the environ with HTTP_ in front.
#
if k.lower() == b'content-type':
content_type = v
elif k.lower() == b'content-length':
content_length = v
elif k.lower() == b'cookie' or k.lower() == b'cookie2':
cookies.append(v)
else:
h = k.upper()
h = h.replace(b'-', b'_')
environ['HTTP_' + h.decode('ISO-8859-1')] = v
if debuglevel >= 2:
print('HEADER:', k, v)
#
# decode the method line
#
if debuglevel >= 2:
print('METHOD LINE:', method_line)
method, url, protocol = method_line.split(' ')
# Store the URI as requested by the user, without modification
# so that PATH_INFO munging can be corrected.
environ['REQUEST_URI'] = url
environ['RAW_URI'] = url
# clean the script_name off of the url, if it's there.
if not url.startswith(script_name):
script_name = '' # @CTB what to do -- bad URL. scrap?
else:
url = url[len(script_name):]
url = url.split('?', 1)
path_info = url_unquote(url[0])
query_string = ""
if len(url) == 2:
query_string = url[1]
if debuglevel:
print("method: %s; script_name: %s; path_info: %s; query_string: %s" %
(method, script_name, path_info, query_string))
r = inp.read()
inp = BytesIO(r)
#
# fill out our dictionary.
#
# In Python3 turn the bytes of the path info into a string of
# latin-1 code points, because that's what the spec says we must
# do to be like a server. Later various libraries will be forced
# to decode and then reencode to get the UTF-8 that everyone
# wants.
if sys.version_info[0] > 2:
path_info = path_info.decode('latin-1')
environ.update({
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": inp, # to read for POSTs
"wsgi.errors": BytesIO(),
"wsgi.multithread": 0,
"wsgi.multiprocess": 0,
"wsgi.run_once": 0,
"PATH_INFO": path_info,
"REMOTE_ADDR": '127.0.0.1',
"REQUEST_METHOD": method,
"SCRIPT_NAME": script_name,
"SERVER_NAME": host,
"SERVER_PORT": port,
"SERVER_PROTOCOL": protocol,
})
#
# query_string, content_type & length are optional.
#
if query_string:
environ['QUERY_STRING'] = query_string
if content_type:
environ['CONTENT_TYPE'] = content_type
if debuglevel >= 2:
print('CONTENT-TYPE:', content_type)
if content_length:
environ['CONTENT_LENGTH'] = content_length
if debuglevel >= 2:
print('CONTENT-LENGTH:', content_length)
#
# handle cookies.
#
if cookies:
environ['HTTP_COOKIE'] = "; ".join(cookies)
if debuglevel:
print('WSGI environ dictionary:', environ)
return environ
class WSGIAppError(Exception):
"""
An exception that wraps any Exception raised by the WSGI app
that is called. This is done for two reasons: it ensures that
intercepted libraries (such as requests) which use exceptions
to trigger behaviors are not interfered with by exceptions from
the WSGI app. It also helps to define a solid boundary, akin
to the network boundary between server and client, in the
testing environment.
"""
def __init__(self, error, exc_info):
Exception.__init__(self)
self.error = error
self.exception_type = exc_info[0]
self.exception_value = exc_info[1]
self.traceback = exc_info[2]
def __str__(self):
frame = traceback.extract_tb(self.traceback)[-1]
formatted = "{0!r} at {1}:{2}".format(
self.error,
frame[0],
frame[1],
)
return formatted
#
# fake socket for WSGI intercept stuff.
#
class wsgi_fake_socket:
"""
Handle HTTP traffic and stuff into a WSGI application object instead.
Note that this class assumes:
1. 'makefile' is called (by the response class) only after all of the
data has been sent to the socket by the request class;
2. non-persistent (i.e. non-HTTP/1.1) connections.
"""
def __init__(self, app, host, port, script_name, https=False):
self.app = app # WSGI app object
self.host = host
self.port = port
self.script_name = script_name # SCRIPT_NAME (app mount point)
self.inp = BytesIO() # stuff written into this "socket"
self.write_results = [] # results from the 'write_fn'
self.results = None # results from running the app
self.output = BytesIO() # all output from the app, incl headers
self.https = https
def makefile(self, *args, **kwargs):
"""
'makefile' is called by the HTTPResponse class once all of the
data has been written. So, in this interceptor class, we need to:
1. build a start_response function that grabs all the headers
returned by the WSGI app;
2. create a wsgi.input file object 'inp', containing all of the
traffic;
3. build an environment dict out of the traffic in inp;
4. run the WSGI app & grab the result object;
5. concatenate & return the result(s) read from the result object.
@CTB: 'start_response' should return a function that writes
directly to self.result, too.
"""
# dynamically construct the start_response function for no good reason.
def start_response(status, headers, exc_info=None):
# construct the HTTP request.
self.output.write(b"HTTP/1.0 " + status.encode('utf-8') + b"\n")
for k, v in headers:
try:
k = k.encode('utf-8')
except AttributeError:
pass
try:
v = v.encode('utf-8')
except AttributeError:
pass
self.output.write(k + b': ' + v + b"\n")
self.output.write(b'\n')
def write_fn(s):
self.write_results.append(s)
return write_fn
# construct the wsgi.input file from everything that's been
# written to this "socket".
inp = BytesIO(self.inp.getvalue())
# build the environ dictionary.
environ = make_environ(inp, self.host, self.port, self.script_name)
if self.https:
environ['wsgi.url_scheme'] = 'https'
# run the application.
try:
app_result = self.app(environ, start_response)
except Exception as error:
raise WSGIAppError(error, sys.exc_info())
self.result = iter(app_result)
###
# read all of the results. the trick here is to get the *first*
# bit of data from the app via the generator, *then* grab & return
# the data passed back from the 'write' function, and then return
# the generator data. this is because the 'write' fn doesn't
# necessarily get called until the first result is requested from
# the app function.
try:
generator_data = None
try:
generator_data = next(self.result)
finally:
for data in self.write_results:
self.output.write(data)
if generator_data:
try:
self.output.write(generator_data)
except TypeError as exc:
raise TypeError('bytes required in response: %s' % exc)
while 1:
data = next(self.result)
self.output.write(data)
except StopIteration:
pass
if hasattr(app_result, 'close'):
app_result.close()
if debuglevel >= 2:
print("***", self.output.getvalue(), "***")
# return the concatenated results.
return BytesIO(self.output.getvalue())
def sendall(self, content):
"""
Save all the traffic to self.inp.
"""
if debuglevel >= 2:
print(">>>", content, ">>>")
try:
self.inp.write(content)
except TypeError:
self.inp.write(content.encode('utf-8'))
def close(self):
"Do nothing, for now."
pass
#
# WSGI_HTTPConnection
#
class WSGI_HTTPConnection(HTTPConnection):
"""
Intercept all traffic to certain hosts & redirect into a WSGI
application object.
"""
def get_app(self, host, port):
"""
Return the app object for the given (host, port).
"""
key = (host, int(port))
app, script_name = None, None
if key in _wsgi_intercept:
(app_fn, script_name) = _wsgi_intercept[key]
app = app_fn()
return app, script_name
def connect(self):
"""
Override the connect() function to intercept calls to certain
host/ports.
If no app at host/port has been registered for interception then
a normal HTTPConnection is made.
"""
if debuglevel:
sys.stderr.write('connect: %s, %s\n' % (self.host, self.port,))
try:
(app, script_name) = self.get_app(self.host, self.port)
if app:
if debuglevel:
sys.stderr.write('INTERCEPTING call to %s:%s\n' %
(self.host, self.port,))
self.sock = wsgi_fake_socket(app, self.host, self.port,
script_name)
else:
HTTPConnection.connect(self)
except Exception:
if debuglevel: # intercept & print out tracebacks
traceback.print_exc()
raise
#
# WSGI_HTTPSConnection
#
class WSGI_HTTPSConnection(HTTPSConnection, WSGI_HTTPConnection):
"""
Intercept all traffic to certain hosts & redirect into a WSGI
application object.
"""
def get_app(self, host, port):
"""
Return the app object for the given (host, port).
"""
key = (host, int(port))
app, script_name = None, None
if key in _wsgi_intercept:
(app_fn, script_name) = _wsgi_intercept[key]
app = app_fn()
return app, script_name
def connect(self):
"""
Override the connect() function to intercept calls to certain
host/ports.
If no app at host/port has been registered for interception then
a normal HTTPSConnection is made.
"""
if debuglevel:
sys.stderr.write('connect: %s, %s\n' % (self.host, self.port,))
try:
(app, script_name) = self.get_app(self.host, self.port)
if app:
if debuglevel:
sys.stderr.write('INTERCEPTING call to %s:%s\n' %
(self.host, self.port,))
self.sock = wsgi_fake_socket(app, self.host, self.port,
script_name, https=True)
else:
try:
import ssl
if not hasattr(self, 'key_file'):
self.key_file = None
if not hasattr(self, 'cert_file'):
self.cert_file = None
if not hasattr(self, '_context'):
try:
self._context = ssl.create_default_context()
except AttributeError:
self._context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._context.options |= ssl.OP_NO_SSLv2
if not hasattr(self, 'check_hostname'):
self._check_hostname = (self._context.verify_mode
!= ssl.CERT_NONE)
else:
self._check_hostname = self.check_hostname
except (ImportError, AttributeError):
pass
HTTPSConnection.connect(self)
except Exception:
if debuglevel: # intercept & print out tracebacks
traceback.print_exc()
raise
| {
"content_hash": "6ced28822c6b4b5b8cb10e838234dd47",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 109,
"avg_line_length": 31.906462585034014,
"alnum_prop": 0.5914929907787432,
"repo_name": "sileht/python3-wsgi-intercept",
"id": "37728427ccdfc7aa4e0502f307c83cec6d35459e",
"size": "18762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi_intercept/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "650"
},
{
"name": "Python",
"bytes": "41603"
}
],
"symlink_target": ""
} |
from noodles import (schedule, quote, unquote)
def find_first(pred, lst):
"""Find the first result of a list of promises `lst` that satisfies a
predicate `pred`.
:param pred: a function of one argument returning `True` or `False`.
:param lst: a list of promises or values.
:return: a promise of a value or `None`.
This is a wrapper around :func:`s_find_first`. The first item on the list
is passed *as is*, forcing evalutation. The tail of the list is quoted, and
only unquoted if the predicate fails on the result of the first promise.
If the input list is empty, `None` is returned."""
if lst:
return s_find_first(pred, lst[0], [quote(l) for l in lst[1:]])
else:
return None
@schedule
def s_find_first(pred, first, lst):
"""Evaluate `first`; if predicate `pred` succeeds on the result of `first`,
return the result; otherwise recur on the first element of `lst`.
:param pred: a predicate.
:param first: a promise.
:param lst: a list of quoted promises.
:return: the first element for which predicate is true."""
if pred(first):
return first
elif lst:
return s_find_first(pred, unquote(lst[0]), lst[1:])
else:
return None
| {
"content_hash": "f9c6a1ad9d2781c30cf9690fc2bc9b2a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 33.810810810810814,
"alnum_prop": 0.657074340527578,
"repo_name": "NLeSC/noodles",
"id": "2ba7c0a1384277f6dc38963d55581ca4411638ba",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noodles/patterns/find_first.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "77"
},
{
"name": "Jupyter Notebook",
"bytes": "261943"
},
{
"name": "Lua",
"bytes": "4645"
},
{
"name": "Python",
"bytes": "244648"
},
{
"name": "Shell",
"bytes": "4923"
}
],
"symlink_target": ""
} |
"""
Monkey patches for changes to classes and functions in Eppy. These include
fixes which have not yet made it to the released version of Eppy. These will be
removed if/when they are added to Eppy.
"""
import copy
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union # noqa
from eppy import bunchhelpers, iddgaps
from eppy.EPlusInterfaceFunctions import eplusdata, iddindex, parse_idd
from eppy.EPlusInterfaceFunctions.eplusdata import Eplusdata # noqa
from eppy.bunch_subclass import EpBunch as BaseBunch
from eppy.idf_msequence import Idf_MSequence
from eppy.idfreader import convertallfields, iddversiontuple
from eppy.modeleditor import IDF as BaseIDF
from eppy.modeleditor import IDDNotSetError, namebunch, newrawobject
from .geom.polygons import Polygon3D # noqa
from .geom.surfaces import set_coords
from .geom.vectors import Vector3D # noqa
if False:
from .idf import IDF # noqa
class EpBunch(BaseBunch):
"""Monkeypatched EpBunch to add the setcoords function."""
def setcoords(
self,
poly, # type: Union[List[Vector3D], List[Tuple[float, float, float]], Polygon3D]
ggr=None, # type: Optional[Union[List, None, Idf_MSequence]]
):
# type: (...) -> None
"""Set the coordinates of a surface.
:param poly: Either a Polygon3D object of a list of (x,y,z) tuples.
:param ggr: A GlobalGeometryRules IDF object. Defaults to None.
"""
surfaces = [
"BUILDINGSURFACE:DETAILED",
"WALL:DETAILED",
"ROOFCEILING:DETAILED",
"FLOOR:DETAILED",
"FENESTRATIONSURFACE:DETAILED",
"SHADING:SITE:DETAILED",
"SHADING:BUILDING:DETAILED",
"SHADING:ZONE:DETAILED",
]
if self.key.upper() in surfaces:
set_coords(self, poly, ggr)
else:
raise AttributeError
def idfreader1(
fname, # type: str
iddfile, # type: str
theidf, # type: IDF
conv=True, # type: Optional[bool]
commdct=None, # type: List[List[Dict[str, Any]]]
block=None, # type: Optional[List]
):
# type: (...) -> Tuple[Dict[str, Any], Optional[List[Any]], Any, List[List[Dict[str, Any]]], Any, Any]
"""Read idf file and return bunches.
:param fname: Name of the IDF file to read.
:param iddfile: Name of the IDD file to use to interpret the IDF.
:param conv: If True, convert strings to floats and integers where marked in the IDD. Defaults to None.
:param commdct: Descriptions of IDF fields from the IDD. Defaults to None.
:param block: EnergyPlus field ID names of the IDF from the IDD. Defaults to None.
:returns: bunchdt Dict of lists of idf_MSequence objects in the IDF.
:returns: block EnergyPlus field ID names of the IDF from the IDD.
:returns data: Eplusdata object containing representions of IDF objects.
:returns: commdct List of names of IDF objects.
:returns: idd_index A pair of dicts used for fast lookups of names of groups of objects.
:returns: versiontuple Version of EnergyPlus from the IDD.
"""
versiontuple = iddversiontuple(iddfile)
block, data, commdct, idd_index = readdatacommdct1(
fname, iddfile=iddfile, commdct=commdct, block=block
)
if conv:
convertallfields(data, commdct)
# fill gaps in idd
if versiontuple < (8,):
skiplist = ["TABLE:MULTIVARIABLELOOKUP"] # type: Optional[List[str]]
else:
skiplist = None
nofirstfields = iddgaps.missingkeys_standard(commdct, data.dtls, skiplist=skiplist)
iddgaps.missingkeys_nonstandard(block, commdct, data.dtls, nofirstfields)
bunchdt = makebunches(data, commdct, theidf)
return bunchdt, block, data, commdct, idd_index, versiontuple
def readdatacommdct1(
idfname, # type: str
iddfile="Energy+.idd", # type: str
commdct=None, # type: Optional[List[List[Dict[str, Any]]]]
block=None, # type: Optional[List]
):
# type: (...) -> Tuple[Optional[List[Any]], Any, List[List[Dict[str, Any]]], Any]
"""Read the idf file.
This is patched so that the IDD index is not lost when reading a new IDF without reloading the modeleditor module.
:param idfname: Name of the IDF file to read.
:param iddfile: Name of the IDD file to use to interpret the IDF.
:param commdct: Descriptions of IDF fields from the IDD. Defaults to None.
:param block: EnergyPlus field ID names of the IDF from the IDD. Defaults to None.
:returns: block EnergyPlus field ID names of the IDF from the IDD.
:returns data: Eplusdata object containing representions of IDF objects.
:returns: commdct List of names of IDF objects.
:returns: idd_index A pair of dicts used for fast lookups of names of groups of objects.
"""
if not commdct:
block, commlst, updated_commdct, idd_index = parse_idd.extractidddata(iddfile)
theidd = eplusdata.Idd(block, 2)
else:
theidd = eplusdata.Idd(block, 2)
name2refs = iddindex.makename2refdct(commdct)
ref2namesdct = iddindex.makeref2namesdct(name2refs)
idd_index = dict(name2refs=name2refs, ref2names=ref2namesdct)
updated_commdct = iddindex.ref2names2commdct(ref2namesdct, commdct)
data = eplusdata.Eplusdata(theidd, idfname)
return block, data, updated_commdct, idd_index
def addthisbunch(
bunchdt, # type: Dict[str, Idf_MSequence]
data, # type: Eplusdata
commdct, # type: List[List[Dict[str, Any]]]
thisbunch, # type: EpBunch
_idf, # type: IDF
):
# type: (...) -> EpBunch
"""Add an object to the IDF. Monkeypatched to return the object.
`thisbunch` usually comes from another idf file or it can be used to copy within the idf file.
:param bunchdt: Dict of lists of idf_MSequence objects in the IDF.
:param data: Eplusdata object containing representions of IDF objects.
:param commdct: Descriptions of IDF fields from the IDD.
:param thisbunch: The object to add to the model.
:param _idf: The IDF object. Not used either here or in Eppy but kept for consistency with Eppy.
:returns: The EpBunch object added.
"""
key = thisbunch.key.upper()
obj = copy.copy(thisbunch.obj)
abunch = obj2bunch(data, commdct, obj)
bunchdt[key].append(abunch)
return abunch
def makebunches(
data, commdct, theidf
): # type: (Eplusdata, List[List[Dict[str, Any]]], IDF) -> Dict[str, Idf_MSequence]
"""Make bunches with data.
:param data: Eplusdata object containing representions of IDF objects.
:param commdct: Descriptions of IDF fields from the IDD.
:param theidf: The IDF object.
:returns: Dict of lists of idf_MSequence objects in the IDF.
"""
bunchdt = {}
dt, dtls = data.dt, data.dtls
for obj_i, key in enumerate(dtls):
key = key.upper()
objs = dt[key]
list1 = []
for obj in objs:
bobj = makeabunch(commdct, obj, obj_i)
list1.append(bobj)
bunchdt[key] = Idf_MSequence(list1, objs, theidf)
return bunchdt
def obj2bunch(
data, commdct, obj
): # type: (Eplusdata, List[List[Dict[str, Any]]], List[str]) -> EpBunch
"""Make a new bunch object using the data object.
:param data: Eplusdata object containing representions of IDF objects.
:param commdct: Descriptions of IDF fields from the IDD.
:param obj: List of field values in an object.
:returns: EpBunch object.
"""
dtls = data.dtls
key = obj[0].upper()
key_i = dtls.index(key)
abunch = makeabunch(commdct, obj, key_i)
return abunch
def makeabunch(
commdct, # type: List[List[Dict[str, Any]]]
obj, # type: Union[List[Union[float, str]], List[str]]
obj_i, # type: int
):
# type: (...) -> EpBunch
"""Make a bunch from the object.
:param commdct: Descriptions of IDF fields from the IDD.
:param obj: List of field values in an object.
:param obj_i: Index of the object in commdct.
:returns: EpBunch object.
"""
objidd = commdct[obj_i]
objfields = [comm.get("field") for comm in commdct[obj_i]] # type: List
objfields[0] = ["key"]
objfields = [field[0] for field in objfields]
obj_fields = [bunchhelpers.makefieldname(field) for field in objfields]
bobj = EpBunch(obj, obj_fields, objidd)
return bobj
class PatchedIDF(BaseIDF):
"""Monkey-patched IDF.
Patched to add read (to add additional functionality) and to fix copyidfobject and newidfobject.
"""
def read(self):
"""Read the IDF file and the IDD file.
If the IDD file had already been read, it will not be read again.
Populates the following data structures::
- idfobjects : list
- model : list
- idd_info : list
- idd_index : dict
"""
if self.getiddname() is None:
errortxt = "IDD file needed to read the idf file. Set it using IDF.setiddname(iddfile)"
raise IDDNotSetError(errortxt)
(
self.idfobjects,
block,
self.model,
idd_info,
idd_index,
versiontuple,
) = idfreader1(
self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block
)
self.__class__.setidd(idd_info, idd_index, block, versiontuple)
def newidfobject(self, key, aname="", **kwargs):
# type: (str, str, **Any) -> EpBunch
"""Add a new idfobject to the model.
If you don't specify a value for a field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
:param key: The type of IDF object. This must be in ALL_CAPS.
:param aname: This parameter is not used. It is left there for backward compatibility.
:param kwargs: Keyword arguments in the format `field=value` used to set fields in the EnergyPlus object.
:returns: EpBunch object.
"""
obj = newrawobject(self.model, self.idd_info, key)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn(
"The aname parameter should no longer be used (%s)." % aname,
UserWarning,
)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch) # type: Dict[str, Idf_MSequence]
for k, v in kwargs.items():
abunch[k] = v
return abunch
def copyidfobject(self, idfobject):
# type: (EpBunch) -> EpBunch
"""Add an IDF object to the IDF.
This has been monkey-patched to add the return value.
:param idfobject: The IDF object to copy. Usually from another IDF, or it can be used to copy within this IDF.
:returns: EpBunch object.
"""
return addthisbunch(self.idfobjects, self.model, self.idd_info, idfobject, self)
| {
"content_hash": "f99292a45e6277e6d949abd169b3cc90",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 118,
"avg_line_length": 36.737541528239205,
"alnum_prop": 0.6504792910110327,
"repo_name": "jamiebull1/geomeppy",
"id": "72253dd9c133caf866af3a8dc28d1cd748ec44c5",
"size": "11396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geomeppy/patches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "381"
},
{
"name": "Python",
"bytes": "206395"
}
],
"symlink_target": ""
} |
from importlib import import_module
import sys
__version__ = '0.1'
class Subcommand:
def __init__(self, name, entry_point, description=None):
self.name = name
self.entry_point = entry_point
self.description = description
class Commander:
def __init__(self, description=None, subcmds=None, package=None):
assert subcmds is not None
#self.prog = prog
self.description = description
self.subcmds = subcmds
self.package = package
def __call__(self, argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print('No subcommand specified')
print('Available subcommands:', *(s.name for s in self.subcmds))
return 2
subcmd = argv[1]
if subcmd in {'--help', '-h'}:
print('Batis - install and distribute desktop applications')
print('Subcommands:')
for sc in self.subcmds:
print(' {:<12} - {}'.format(sc.name, sc.description))
return 0
for sc in self.subcmds:
if subcmd == sc.name:
sub_main = self._load(sc.entry_point)
return sub_main(argv[2:])
print('Unknown subcommand: {!r}'.format(subcmd))
print('Available subcommands:', *(s.name for s in self.subcmds))
return 2
def _load(self, entry_point):
modname, func = entry_point.split(':')
mod = import_module(modname, package=self.package)
return getattr(mod, func)
| {
"content_hash": "5790bedd3a8a1348bbfd5486c4c5fcf6",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 30.94,
"alnum_prop": 0.5669036845507434,
"repo_name": "takluyver/vclurk",
"id": "bb0cab906ca0fa2cfeae2b19cc5d015ede7e4543",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vclurk/subcmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14819"
}
],
"symlink_target": ""
} |
"""
Generic python script.
"""
__author__ = "Alex Drlica-Wagner"
import unittest
# Execute tests in order: https://stackoverflow.com/a/22317851/4075339
unittest.TestLoader.sortTestMethodsUsing = None
import numpy as np
import fitsio
import ugali.analysis.loglike
from ugali.utils.logger import logger
logger.setLevel(logger.WARN)
CONFIG='tests/config.yaml'
LON = RA = 53.92
LAT = DEC = -54.05
IDX = [1,2537,9000]
class TestLoglike(unittest.TestCase):
"""Test the loglikelihood"""
def setUp(self):
self.loglike = ugali.analysis.loglike.createLoglike(CONFIG,lon=LON,lat=LAT)
self.source = self.loglike.source
self.filename = 'test-membership.fits'
def test_initial_config(self):
# Likelihood configuration.
np.testing.assert_equal(self.source.isochrone.name,'Bressan2012')
np.testing.assert_equal(self.source.kernel.name,'RadialPlummer')
np.testing.assert_equal(self.source.richness,1000.)
def test_initial_probability(self):
# Probability calculations
np.testing.assert_allclose(self.loglike.f,0.08595560,rtol=1e-6)
np.testing.assert_allclose(self.loglike.u[IDX],
[5.29605173e-03, 1.80040569e-03, 5.52283081e-09],
rtol=1e-6)
np.testing.assert_allclose(self.loglike.b[IDX],
[4215.31143651, 9149.29106545, 1698.22182173],
rtol=1e-6)
np.testing.assert_allclose(self.loglike.p[IDX],
[1.25480793e-03, 1.96742181e-04, 3.25212568e-09],
rtol=1e-6)
np.testing.assert_allclose(self.loglike(),3948.1559048)
np.testing.assert_allclose(self.loglike.ts(),7896.31181)
np.testing.assert_allclose(self.loglike.nobs,85.9556015)
def test_fit_richness(self):
# Fit the richness
interval = self.loglike.richness_interval()
np.testing.assert_allclose(interval,(31596.21551, 32918.707276))
lnl,rich,para = self.loglike.fit_richness()
np.testing.assert_allclose(lnl,8449.77225)
np.testing.assert_allclose(rich,32252.807226)
np.testing.assert_allclose(self.loglike.source.richness,rich)
def test_write_membership(self):
# Write membership
self.loglike.write_membership(self.filename)
# Read membership and metadata
mem,hdr = fitsio.read(self.filename,header=True)
np.testing.assert_allclose(self.loglike.p,mem['PROB'])
np.testing.assert_allclose(self.loglike.catalog.ra,mem['RA'])
np.testing.assert_allclose(self.loglike.catalog.dec,mem['DEC'])
np.testing.assert_allclose(self.loglike.catalog.color,mem['COLOR'])
# Testing output metadata
print (self.loglike.ts(),hdr['TS'])
np.testing.assert_allclose(self.loglike.ts(),hdr['TS'])
np.testing.assert_allclose(self.source.richness,hdr['RICHNESS'])
np.testing.assert_allclose(self.source.age,hdr['AGE'])
np.testing.assert_allclose(self.source.z,hdr['METALLICITY'])
np.testing.assert_allclose(self.source.distance_modulus,hdr['DISTANCE_MODULUS'])
np.testing.assert_allclose(self.source.lon,hdr['LON'])
np.testing.assert_allclose(self.source.lat,hdr['LAT'])
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "afa3f0be3552b8a323d4ef20fda3021c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 39.906976744186046,
"alnum_prop": 0.6386946386946387,
"repo_name": "kadrlica/ugali",
"id": "1ab46b118d386da160916888a13d7b7b93cac5b0",
"size": "3454",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_loglike.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "355304"
},
{
"name": "Python",
"bytes": "952323"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os.path
from taggit.managers import TaggableManager
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
from django.dispatch import Signal
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailadmin.taggable import TagSearchable
from wagtail.wagtailadmin.utils import get_object_usage
from wagtail.wagtailsearch import index
from wagtail.wagtailsearch.queryset import SearchableQuerySetMixin
class DocumentQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
@python_2_unicode_compatible
class Document(models.Model, TagSearchable):
title = models.CharField(max_length=255, verbose_name=_('Title'))
file = models.FileField(upload_to='documents', verbose_name=_('File'))
created_at = models.DateTimeField(verbose_name=_('Created at'), auto_now_add=True)
uploaded_by_user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('Uploaded by user'), null=True, blank=True, editable=False)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_('Tags'))
objects = DocumentQuerySet.as_manager()
search_fields = TagSearchable.search_fields + (
index.FilterField('uploaded_by_user'),
)
def __str__(self):
return self.title
@property
def filename(self):
return os.path.basename(self.file.name)
@property
def file_extension(self):
return os.path.splitext(self.filename)[1][1:]
@property
def url(self):
return reverse('wagtaildocs_serve', args=[self.id, self.filename])
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse('wagtaildocs:document_usage',
args=(self.id,))
def is_editable_by_user(self, user):
if user.has_perm('wagtaildocs.change_document'):
# user has global permission to change documents
return True
elif user.has_perm('wagtaildocs.add_document') and self.uploaded_by_user == user:
# user has document add permission, which also implicitly provides permission to edit their own documents
return True
else:
return False
class Meta:
verbose_name = _('Document')
# Receive the pre_delete signal and delete the file associated with the model instance.
@receiver(pre_delete, sender=Document)
def document_delete(sender, instance, **kwargs):
# Pass false so FileField doesn't save the model.
instance.file.delete(False)
document_served = Signal(providing_args=['request'])
| {
"content_hash": "2c2e13acfdfd125d303b99adfe194513",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 141,
"avg_line_length": 33.01176470588236,
"alnum_prop": 0.7127583749109052,
"repo_name": "hanpama/wagtail",
"id": "56264b6937dd97f354264faa529a9e6733524eb7",
"size": "2806",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wagtail/wagtaildocs/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "152699"
},
{
"name": "HTML",
"bytes": "251513"
},
{
"name": "JavaScript",
"bytes": "92646"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1615982"
},
{
"name": "Shell",
"bytes": "7241"
}
],
"symlink_target": ""
} |
import sys, math, os
# Panda imports
from panda3d.core import *
from pandac.PandaModules import *
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.task import Task
from direct.showbase.DirectObject import DirectObject
from panda3d.core import BitMask32
from panda3d.bullet import *
from direct.showbase.InputStateGlobal import inputState
# Game imports
from devconfig import *
from globals import *
from gui import *
#---------------------------------------------------------------------#
## Main Player Class.
class Player(object):
"""
Player Class:
This class handels all "Players" in game (Actors)
@method addEntity: Use this to add a created entity to the global entity Dict{}
"""
def __init__(self):
pass
# These are players and other entities
def addEntity(self, entityKey, entityObject):
"""
@param entityKey: Pref the name of the entity
@param entityObject: Pref the name of the created entity
"""
# Add entity to the global enity dict{}
ENTITY[entityKey] = entityObject
## MakePlayer Class
# Will move this class under physics.py so that we have some order.
class MakePlayer(DirectObject):
"""
MakePlayer Class:
This class handels the creation of Players.
Players will be stored in the Entity dict.
"""
def __init__(self):
"""
constructor:
@param name: String_name, for the Player - In game.
@param entityName: String_name for the PC - Player in ENTITY /
dict{} for all uses in code.
"""
self.direction = Vec3(0,0,0)
self.angular_direction = Vec3(0,0,0)
self.speed = 1
self.angular_speed = 3
# Setup Player inventory
self.playerDataStorage = [] # May change
## ADD MOUSE LOOK TASK TO TASKMGR
#taskMgr.add(self.look, 'camera')
# Crouch Flag
self.crouching = False
# Mouse look
self.omega = 0.0
# Setup player input
self.accept('space', self.jump)
self.accept('c', self.crouch) # We need to fix the height
self.accept( "escape",sys.exit )
self.accept('arrow_up', self.up )
self.accept('arrow_down', self.down )
self.accept('arrow_left', self.left )
self.accept('arrow_right', self.right)
self.accept("arrow_up-up", self.idle, ["up"])
self.accept("arrow_down-up", self.idle, ["down"])
self.accept("arrow_left-up", self.idle, ["left"])
self.accept("arrow_right-up", self.idle, ["right"])
#inputState.watchWithModifiers('forward', 'w')
#inputState.watchWithModifiers('left', 'a')
#inputState.watchWithModifiers('reverse', 's')
#inputState.watchWithModifiers('right', 'd')
#inputState.watchWithModifiers('turnLeft', 'q')
#inputState.watchWithModifiers('turnRight', 'e')
#inputState.watchWithModifiers('turnRight', 'e')
# Camera Setup for player
# Get the screen size for the camera controller
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
## SETUP CHARACTER AND CHARACTER SHAPE
# Setup Shape
# units = meters
# body height : 1.8 meters
# eyes line : 1.8 - 0.11 meters = 1.69 meters
# h is distance between the centers of the 2 spheres
# w is radius of the spheres
# 1.8 = 0.3 + 1.2 + 0.3
# center : 1.8/2 = 0.9
# camera height : 1.69-0.9 = 0.79
h = 1.2
w = 0.3
# Player needs different setup saam as bullet character controller.
# Atm force gets added onto the node making it ballich
shape = BulletCapsuleShape(w, h , ZUp)
node = BulletRigidBodyNode('Box')
node.setMass(1.0)
node.addShape(shape)
self.node = node
node.setAngularDamping(10)
np = GAMEPLAY_NODES['PLAYER'].attachNewNode(node)
np.setPos(0, 0, 1)
self.arm = np.attachNewNode('arm')
self.arm.setPos(0,0,0.2)
self.np = np
PHYSICS['WORLD'].attachRigidBody(node)
#self.character = BulletCharacterControllerNode(shape, 1, 'Player')
#-------------------------------------------------------------------#
# PLAYER GRAVITY SETTINGS AND FALL SPEED #
#self.character.setGravity(0.87)
#self.character.setFallSpeed(0.3)
#
#-------------------------------------------------------------------#
#self.characterNP = GAMEPLAY_NODES['PLAYER'].attachNewNode(self.character)
#self.characterNP.setPos(0, 0, 2) # May need some tweaking
#self.characterNP.setCollideMask(BitMask32.allOn())
# Attach the character to the base _Physics
#PHYSICS['WORLD'].attachCharacter(self.character)
# Reparent the camera to the player
#base.camera.reparentTo(self.np)
#base.camera.setPos(0,0,0.79)
#base.camLens.setNearFar(camNear,camFar)
base.camLens.setFov(90)
base.disableMouse()
gui = Crosshair()
self.arm = loader.loadModel('../assets/models/test.egg')
screens = self.arm.findAllMatches('**')
self.arm_screen = None
rot = 0
pos = 0
for screen in screens :
if screen.hasTag('screen'):
self.arm_screen = screen
rot = screen.getHpr()
pos = screen.getPos()
print("rotation"+str(rot))
self.actor = Actor('../assets/models/test.egg', {'anim1':'../assets/models/test-Anim0.egg'})
self.actor.reparentTo(self.np)
self.actor.loop('anim1')
self.actor.setPos(.0,-0.1,0.4)
self.actor.setH(180)
self.actor.node().setBounds(OmniBoundingVolume())
self.actor.node().setFinal(True)
#self.actor.setTwoSided(True)
#self.actor.reparentTo(self.world.buffer_system.geom_cam)
#self.actor.hide(self.world.buffer_system.light_mask)
# attach smth to hand
picker = self.actor.exposeJoint(None,"modelRoot","hand_picker")
arm_bone = self.actor.exposeJoint(None,"modelRoot","screen_picker")
self.arm_screen.reparentTo(arm_bone)
self.arm_screen.setH(self.arm_screen.getH()+90)
self.temp_animate = self.arm_screen
self.picker = picker
taskMgr.add(self.update,'update player position')
# Player Debug:
#print ""
#print "Player Character controller settings: "
#print ""
#print "Character Gravity: ", self.character.getGravity()
#print "Character Max Slope: ",self.character.getMaxSlope()
#print ""
def up(self):
self.direction += Vec3(0,1,0)
self.angular_direction += Vec3(1,0,0)
def down(self):
self.direction += Vec3(0,-1,0)
def left(self):
self.direction += Vec3(-1,0,0)
def right(self):
self.direction += Vec3(1,0,0)
def idle(self, key):
if(key == "up"):
self.direction -= Vec3(0,1,0)
self.angular_direction -= Vec3(1,0,0)
elif(key == "down"):
self.direction -= Vec3(0,-1,0)
elif(key == "left"):
self.direction -= Vec3(-1,0,0)
elif(key == "right"):
self.direction -= Vec3(1,0,0)
# Handle player jumping
def jump(self):
self.character.setMaxJumpHeight(2.3)
self.character.setJumpSpeed(4.5)
self.character.doJump()
# Handle player crouch. <Buged to shit>
def crouch(self):
self.crouching = not self.crouching
sz = self.crouching and 0.6 or 1.0
#self.character.getShape().setLocalScale(Vec3(1, 1, sz))
self.characterNP.setScale(Vec3(1, 1, sz) * 0.3048)
#self.characterNP.setPos(0, 0, -1 * sz)
# Handle player mouse
def look(self, task):
dt = globalClock.getDt()
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
self.omega = (x - self.winXhalf)*-mouseSpeed
base.camera.setP( (clampScalar(-90,90, base.camera.getP() - (y - self.winYhalf)*0.09)) )
self.processInput(dt)
return task.cont
def update(self,task):
dt = globalClock.getDt()
self.np.setPos(self.np,self.direction * dt * self.speed)
base.camera.setPos(self.np.getPos()+ Vec3(0,0,0.79))
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
base.camera.setP(base.camera.getP() - (y - self.winYhalf)*dt*self.angular_speed)
self.np.setH(self.np.getH() - (x - self.winXhalf)*dt*self.angular_speed)
base.camera.setH(self.np.getH())
base.camera.setR(self.np.getR())
self.node.setAngularFactor(0)
self.node.setAngularVelocity(0)
BUFFER_SYSTEM['main'].reflection_cube.setPos(base.camera.getPos())
BUFFER_SYSTEM['main'].reflection_cube.setHpr(base.camera.getHpr())
return task.cont
# Handle player input
def processInput(self, dt):
print(self.direction)
speed = Vec3(0, 0, 0)
#@param PCSpeed: Player move speed under devconfig.py
if inputState.isSet('forward'): speed.setY( PCSpeed)
if inputState.isSet('reverse'): speed.setY(-PCSpeed)
if inputState.isSet('left'): speed.setX(-PCSpeed)
if inputState.isSet('right'): speed.setX( PCSpeed)
self.character.setAngularMovement(self.omega)
self.character.setLinearMovement(speed, True)
| {
"content_hash": "9efbf59c3732d0a30485370dd876577e",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 101,
"avg_line_length": 32.90064102564103,
"alnum_prop": 0.5584023380418899,
"repo_name": "MJ-meo-dmt/Ecliptic",
"id": "2c9981cbe31994c120bf55b101dd6c478d083ce4",
"size": "10301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/player.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60761"
}
],
"symlink_target": ""
} |
from xml.dom import minidom, Node
def disable_urllib3_warning():
"""
https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning
InsecurePlatformWarning 警告的临时解决方案
"""
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except Exception:
pass
class XMLStore(object):
"""
XML 存储类,可方便转换为 Dict
"""
def __init__(self, xmlstring):
self._raw = xmlstring
self._doc = minidom.parseString(xmlstring)
@property
def xml2dict(self):
"""
将 XML 转换为 dict
"""
self._remove_whitespace_nodes(self._doc.childNodes[0])
return self._element2dict(self._doc.childNodes[0])
def _element2dict(self, parent):
"""
将单个节点转换为 dict
"""
d = {}
for node in parent.childNodes:
if not isinstance(node, minidom.Element):
continue
if not node.hasChildNodes():
continue
if node.childNodes[0].nodeType == minidom.Node.ELEMENT_NODE:
try:
d[node.tagName]
except KeyError:
d[node.tagName] = []
d[node.tagName].append(self._element2dict(node))
elif len(node.childNodes) == 1 and node.childNodes[0].nodeType in [minidom.Node.CDATA_SECTION_NODE, minidom.Node.TEXT_NODE]:
d[node.tagName] = node.childNodes[0].data
return d
def _remove_whitespace_nodes(self, node, unlink=True):
"""
删除空白无用节点
"""
remove_list = []
for child in node.childNodes:
if child.nodeType == Node.TEXT_NODE and not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
self._remove_whitespace_nodes(child, unlink)
for node in remove_list:
node.parentNode.removeChild(node)
if unlink:
node.unlink()
def dict2xml(_dict):
xml_el_tpl = "<{tag}>{value}</{tag}>"
el_list = []
sorted_keys = sorted(_dict.keys())
for key in sorted_keys:
value = _dict.get(key)
if isinstance(value, (int, float, bool)):
value = str(value)
if type(value) == unicode:
value = value.encode('utf-8')
elif type(value) == str:
value = value.decode('utf-8').encode('utf-8')
else:
raise ValueError("not support type: %s" % type(value))
el_list.append(xml_el_tpl.format(tag=key, value=value))
return "<xml>\n" + "\n".join(el_list) + "\n</xml>"
def xml2dict(xml_str):
xml = XMLStore(xml_str)
return xml.xml2dict
| {
"content_hash": "48b7fff94b8fcbf2d9ebe188ea96cdb5",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 136,
"avg_line_length": 28.103092783505154,
"alnum_prop": 0.5561261922230374,
"repo_name": "kaizengliu/wechat-python-sdk",
"id": "0b111f04921759623f63c5b8b93007cb23d3429b",
"size": "2829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wechat_sdk/lib.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "150682"
}
],
"symlink_target": ""
} |
import ddt
import mock
from manilaclient import api_versions
from manilaclient import extension
from manilaclient.tests.unit import utils
from manilaclient.tests.unit.v2 import fakes
from manilaclient.v2 import share_export_locations
extensions = [
extension.Extension('share_export_locations', share_export_locations),
]
cs = fakes.FakeClient(extensions=extensions)
@ddt.ddt
class ShareExportLocationsTest(utils.TestCase):
def _get_manager(self, microversion):
version = api_versions.APIVersion(microversion)
mock_microversion = mock.Mock(api_version=version)
return (
share_export_locations.ShareExportLocationManager(
api=mock_microversion)
)
def test_list_of_export_locations(self):
share_id = '1234'
cs.share_export_locations.list(share_id)
cs.assert_called(
'GET', '/shares/%s/export_locations' % share_id)
def test_get_single_export_location(self):
share_id = '1234'
el_uuid = 'fake_el_uuid'
cs.share_export_locations.get(share_id, el_uuid)
cs.assert_called(
'GET',
('/shares/%(share_id)s/export_locations/'
'%(el_uuid)s') % {
'share_id': share_id, 'el_uuid': el_uuid})
| {
"content_hash": "38e498ea88ad8040c81bf034c9533b4f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 30.547619047619047,
"alnum_prop": 0.6508183943881528,
"repo_name": "sniperganso/python-manilaclient",
"id": "614d693ef41e9c0edd579d111d5d997a7ca7a2ab",
"size": "1912",
"binary": false,
"copies": "1",
"ref": "refs/heads/bp/data-service-migration-api",
"path": "manilaclient/tests/unit/v2/test_share_export_locations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "755723"
},
{
"name": "Shell",
"bytes": "11199"
}
],
"symlink_target": ""
} |
import Image
import ImageDraw
import time
from rgbmatrix import Adafruit_RGBmatrix
# Rows and chain length are both required parameters:
matrix = Adafruit_RGBmatrix(32,2,3)
# Bitmap example w/graphics prims
image = Image.new("1", (32, 32)) # Can be larger than matrix if wanted!!
draw = ImageDraw.Draw(image) # Declare Draw instance before prims
# Draw some shapes into image (no immediate effect on matrix)...
draw.rectangle((0, 0, 31, 31), fill=0, outline=1)
draw.line((0, 0, 31, 31), fill=1)
draw.line((0, 31, 31, 0), fill=1)
# Then scroll image across matrix...
for n in range(-32, 60): # Start off top-left, move off bottom-right
matrix.Clear()
# IMPORTANT: *MUST* pass image ID, *NOT* image object!
matrix.SetImage(image.im.id, n, n)
time.sleep(0.05)
# 8-bit paletted GIF scrolling example
image = Image.open("VDuedC0.png")
image.load() # Must do this before SetImage() calls
matrix.Fill(0xffffff) # Fill screen to sky color
for n in range(96, -image.size[0], -1): # Scroll R to L
matrix.SetImage(image.im.id, n, 0)
time.sleep(0.025)
# 24-bit RGB scrolling example.
# The adafruit.png image has a couple columns of black pixels at
# the right edge, so erasing after the scrolled image isn't necessary.
matrix.Clear()
sprites = []
sprites.append(Image.open("megaman-sprites/1.png"))
sprites.append(Image.open("megaman-sprites/2.png"))
sprites.append(Image.open("megaman-sprites/3.png"))
sprites.append(Image.open("megaman-sprites/4.png"))
sprites.append(Image.open("megaman-sprites/5.png"))
sprites.append(Image.open("megaman-sprites/6.png"))
sprites.append(Image.open("megaman-sprites/7.png"))
matrix.Fill(0xffffff)
for image in sprites:
image.load()
while 1:
for image in sprites:
matrix.Fill(0xffffff)
matrix.SetImage(image.im.id, 20, 0)
time.sleep(0.08)
| {
"content_hash": "5b28dfcea793567dc06abbc235d489f6",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 72,
"avg_line_length": 33.2962962962963,
"alnum_prop": 0.7235817575083426,
"repo_name": "atmega168/skeeball-pi",
"id": "2ed4e6d4893a9c1a1f4f155169c77c1542156f2f",
"size": "2427",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rasberry pi/matrixtest2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1490"
},
{
"name": "C++",
"bytes": "47926"
},
{
"name": "Makefile",
"bytes": "711"
},
{
"name": "Python",
"bytes": "15907"
}
],
"symlink_target": ""
} |
import six
import os
from st2actions.runners.pythonrunner import Action
from st2client.client import Client
from st2common.util import jinja as jinja_utils
class FormatResultAction(Action):
def __init__(self, config=None, action_service=None):
super(FormatResultAction, self).__init__(config=config, action_service=action_service)
api_url = os.environ.get('ST2_ACTION_API_URL', None)
token = os.environ.get('ST2_ACTION_AUTH_TOKEN', None)
self.client = Client(api_url=api_url, token=token)
self.jinja = jinja_utils.get_jinja_environment(allow_undefined=True)
self.jinja.tests['in'] = lambda item, list: item in list
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'templates/default.j2'), 'r') as f:
self.default_template = f.read()
def run(self, execution_id):
execution = self._get_execution(execution_id)
context = {
'six': six,
'execution': execution
}
template = self.default_template
result = {}
alias_id = execution['context'].get('action_alias_ref', {}).get('id', None)
if alias_id:
alias = self.client.managers['ActionAlias'].get_by_id(alias_id)
context.update({
'alias': alias
})
result_params = getattr(alias, 'result', None)
if result_params:
if not result_params.get('enabled', True):
raise Exception("Output of this template is disabled.")
if 'format' in alias.result:
template = alias.result['format']
if 'extra' in alias.result:
result['extra'] = jinja_utils.render_values(alias.result['extra'], context)
result['message'] = self.jinja.from_string(template).render(context)
return result
def _get_execution(self, execution_id):
if not execution_id:
raise ValueError('Invalid execution_id provided.')
execution = self.client.liveactions.get_by_id(id=execution_id)
if not execution:
return None
excludes = ["trigger", "trigger_type", "trigger_instance", "liveaction"]
return execution.to_dict(exclude_attributes=excludes)
| {
"content_hash": "d487c03f22b16f9b45bb98b0a7494c00",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 95,
"avg_line_length": 39.152542372881356,
"alnum_prop": 0.6060606060606061,
"repo_name": "emedvedev/st2",
"id": "26462d7687897ed85f625b05756c6b4b0472c91b",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/chatops/actions/format_execution_result.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41694"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3717722"
},
{
"name": "Shell",
"bytes": "38637"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
"""
Music package
@author: Ronny Andersson (ronny@andersson.tk)
@copyright: (c) 2016 Ronny Andersson
@license: MIT
"""
# Local folders
from . import scales, spn
__all__ = [
'scales',
'spn',
]
| {
"content_hash": "863408414523148492df70d929201920",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 45,
"avg_line_length": 13.8,
"alnum_prop": 0.6231884057971014,
"repo_name": "ronnyandersson/zignal",
"id": "98cfcd2eb9e41044b5b8ee16f9f955e488ce0310",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "zignal/music/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "318"
},
{
"name": "Python",
"bytes": "181521"
}
],
"symlink_target": ""
} |
import roslib; roslib.load_manifest('hrl_pr2_lib')
import rospy
import kinematics_msgs.srv as ks
import hrl_lib.tf_utils as tfu
import tf
import numpy as np
import pdb
class PR2ArmKinematics:
def __init__(self, arm, listener):
self.tflistener = listener
rospy.loginfo('PR2ArmKinematics: waiting for services for %s arm ' % arm)
if arm == 'right':
self.tflistener.waitForTransform('r_gripper_tool_frame', 'r_wrist_roll_link', rospy.Time(), rospy.Duration(10))
else:
self.tflistener.waitForTransform('l_gripper_tool_frame', 'l_wrist_roll_link', rospy.Time(), rospy.Duration(10))
# Forward kinematics
rospy.wait_for_service('pr2_' + arm + '_arm_kinematics/get_fk_solver_info')
rospy.wait_for_service('pr2_' + arm + '_arm_kinematics/get_fk')
rospy.loginfo('PR2ArmKinematics: forward kinematics services online.')
self._fk_info = rospy.ServiceProxy('pr2_' + arm + '_arm_kinematics/get_fk_solver_info', ks.GetKinematicSolverInfo )
self._fk = rospy.ServiceProxy('pr2_' + arm + '_arm_kinematics/get_fk', ks.GetPositionFK, persistent=True)
self.fk_info_resp = self._fk_info()
self.joint_names = self.fk_info_resp.kinematic_solver_info.joint_names
print 'PR2ArmKinematics: number of joints', len(self.joint_names)
# Inverse kinematics
rospy.wait_for_service("pr2_" + arm + "_arm_kinematics/get_ik_solver_info")
rospy.wait_for_service("pr2_" + arm + "_arm_kinematics/get_ik")
rospy.loginfo('PR2ArmKinematics: inverse kinematics services online.')
self._ik_info = rospy.ServiceProxy('pr2_' + arm +'_arm_kinematics/get_ik_solver_info', ks.GetKinematicSolverInfo)
self._ik = rospy.ServiceProxy('pr2_' + arm +'_arm_kinematics/get_ik', ks.GetPositionIK, persistent=True)
self.ik_info_resp = self._ik_info()
self.ik_joint_names = self.ik_info_resp.kinematic_solver_info.joint_names
if arm == 'left':
self.ik_frame = 'l_wrist_roll_link'
self.tool_frame = 'l_gripper_tool_frame'
else:
self.ik_frame = 'r_wrist_roll_link'
self.tool_frame = 'r_gripper_tool_frame'
##
# Inverse kinematics
# @param cart_pose a 4x4 SE(3) pose
# @param frame_of_pose frame cart_pose is given in, if None we assume that self.tool_frame is being used
# @param seed starting solution for IK solver (list of floats or column np.matrix of floats)
def ik(self, cart_pose, frame_of_pose='torso_lift_link', seed=None):
#if frame_of_pose == self.tool_frame or frame_of_pose == None:
self.tflistener.waitForTransform(self.ik_frame, self.tool_frame, rospy.Time(), rospy.Duration(10))
#wr_T_toolframe = tfu.transform(sol_link, self.tool_frame, self.tflistener)
#solframe_T_wr * wr_T_toolframe
#print 'undoing'
toolframe_T_ikframe = tfu.transform(self.tool_frame, self.ik_frame, self.tflistener)
cart_pose = cart_pose * toolframe_T_ikframe
#frame_of_pose = self.tool_frame
trans, rot = tfu.matrix_as_tf(cart_pose)
ik_req = ks.GetPositionIKRequest()
ik_req.timeout = rospy.Duration(5.0)
ik_req.ik_request.ik_link_name = self.ik_frame
#set pose
ik_req.ik_request.pose_stamped.header.frame_id = frame_of_pose
ik_req.ik_request.pose_stamped.pose.position.x = trans[0]#cart_pose[0][0,0]
ik_req.ik_request.pose_stamped.pose.position.y = trans[1]#cart_pose[0][1,0]
ik_req.ik_request.pose_stamped.pose.position.z = trans[2]#cart_pose[0][2,0]
ik_req.ik_request.pose_stamped.pose.orientation.x = rot[0]#cart_pose[1][0,0];
ik_req.ik_request.pose_stamped.pose.orientation.y = rot[1]#cart_pose[1][1,0];
ik_req.ik_request.pose_stamped.pose.orientation.z = rot[2]#cart_pose[1][2,0];
ik_req.ik_request.pose_stamped.pose.orientation.w = rot[3]#cart_pose[1][3,0];
#seed solver
ik_req.ik_request.ik_seed_state.joint_state.name = self.ik_joint_names
if seed == None:
p = []
for i in range(len(self.ik_joint_names)):
minp = self.ik_info_resp.kinematic_solver_info.limits[i].min_position
maxp = self.ik_info_resp.kinematic_solver_info.limits[i].max_position
p.append((minp + maxp) / 2.0)
ik_req.ik_request.ik_seed_state.joint_state.position = p
else:
if seed.__class__ == np.matrix:
seed = seed.T.A1.tolist()
ik_req.ik_request.ik_seed_state.joint_state.position = seed
response = self._ik(ik_req)
if response.error_code.val == response.error_code.SUCCESS:
#print 'success'
return np.matrix(response.solution.joint_state.position).T
else:
#print 'fail', response.__class__, response
print response
return None
##
# Forward Kinematics
# @param joint_poses_mat nx1 matrix of joint positions
# @param frame frame to give solution in
# @param sol_link link to solve FK for
# @param use_tool_frame FK doesn't account for length of tool frame (PR2 gripper),
# if sol_link is the wrist link then will return the
# gripper's FK.
# @return a 4x4 SE(3) matrix
def fk(self, joint_poses_mat, frame='torso_lift_link', sol_link=None, use_tool_frame=True):
if sol_link == None:
sol_link = self.ik_frame
fk_req = ks.GetPositionFKRequest()
fk_req.header.frame_id = frame
fk_req.fk_link_names = [sol_link]
fk_req.robot_state.joint_state.name = self.fk_info_resp.kinematic_solver_info.joint_names
fk_req.robot_state.joint_state.position = joint_poses_mat.T.A1.tolist()
fk_resp = self._fk(fk_req)
solframe_T_wr = tfu.pose_as_matrix(fk_resp.pose_stamped[0].pose)
if not use_tool_frame:
return solframe_T_wr
else:
#print 'redoing'
self.tflistener.waitForTransform(self.tool_frame, sol_link, rospy.Time(), rospy.Duration(10))
wr_T_toolframe = tfu.transform(sol_link, self.tool_frame, self.tflistener)
return solframe_T_wr * wr_T_toolframe
class PR2Kinematics:
def __init__(self, tflistener=None):
try:
rospy.init_node('kinematics', anonymous=True)
except rospy.exceptions.ROSException, e:
pass
if tflistener == None:
self.tflistener = tf.TransformListener()
else:
self.tflistener = tflistener
self.left = PR2ArmKinematics('left', self.tflistener)
self.right = PR2ArmKinematics('right', self.tflistener)
if __name__ == '__main__':
import pr2
import pdb
robot = pr2.PR2()
pose = robot.left.pose()
k = PR2Kinematics(robot.tf_listener)
cart = k.left.fk(pose)
print 'cart pose\n', cart
print 'real pose', pose.T
ik_sol = k.left.ik(cart, 'torso_lift_link').T
print 'ik pose', ik_sol
print 'ik cart\n', k.left.fk(ik_sol)
| {
"content_hash": "74a7c04a452885efb3713f28977a49f7",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 130,
"avg_line_length": 45.24683544303797,
"alnum_prop": 0.6219051615610575,
"repo_name": "gt-ros-pkg/hrl-pr2",
"id": "b36f21e682dae7aae3def2af920e2976e68dad71",
"size": "7149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hrl_pr2_lib/src/hrl_pr2_lib/pr2_kinematics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "128659"
},
{
"name": "Shell",
"bytes": "472"
}
],
"symlink_target": ""
} |
"""
Helper functions to verify `JWT`_ (JSON Web Token) objects.
Some are specific to Mozilla Marketplace payments, others are more generic.
.. _`JWT`: http://openid.net/specs/draft-jones-json-web-token-07.html
"""
import calendar
from datetime import datetime
import json
import sys
import time
import jwt
from .exc import InvalidJWT, RequestExpired
def verify_jwt(signed_request, expected_aud, secret, validators=[],
required_keys=('request.pricePoint',
'request.name',
'request.description',
'response.transactionID')):
"""
Verifies a postback/chargeback JWT.
Returns the trusted JSON data from the original request.
When there's an error, an exception derived from
:class:`moz_inapp_pay.exc.InvalidJWT`
will be raised.
This is an all-in-one function that does all verification you'd
need. There are some low-level functions you can use to just
verify certain parts of a JWT.
Arguments:
**signed_request**
JWT byte string.
**expected_aud**
The expected value for the aud (audience) of the JWT.
See :func:`moz_inapp_pay.verify.verify_audience`.
**secret**
A shared secret to validate the JWT with.
See :func:`moz_inapp_pay.verify.verify_sig`.
**validators**
A list of extra callables. Each one is passed a JSON Python dict
representing the JWT after it has passed all other checks.
**required_keys**
A list of JWT keys to validate. See
:func:`moz_inapp_pay.verify.verify_keys`.
"""
issuer = _get_issuer(signed_request=signed_request)
app_req = verify_sig(signed_request, secret, issuer=issuer)
verify_claims(app_req, issuer=issuer)
verify_audience(app_req, expected_aud, issuer=issuer)
verify_keys(app_req, required_keys, issuer=issuer)
for vl in validators:
vl(app_req)
return app_req
def verify_audience(app_req, expected_aud, issuer=None):
"""
Verify JWT aud (audience)
When aud is not found or doesn't match expected_aud,
:class:`moz_inapp_pay.exc.InvalidJWT`
is raised.
The valid audience is returned
"""
if not issuer:
issuer = _get_issuer(app_req=app_req)
audience, = verify_keys(app_req, ['aud'])
if audience != expected_aud:
raise InvalidJWT('JWT aud (audience) must be set to %r; '
'got: %r' % (expected_aud, audience),
issuer=issuer)
return audience
def verify_claims(app_req, issuer=None):
"""
Verify JWT claims.
All times must be UTC unix timestamps.
These claims will be verified:
- iat: issued at time. If JWT was issued more than an hour ago it is
rejected.
- exp: expiration time.
- nbf: not before time. This is padded with 5 minutes for clock skew.
This field is *optional*, leaving it out is not an error.
All exceptions are derived from
:class:`moz_inapp_pay.exc.InvalidJWT`.
For expirations a
:class:`moz_inapp_pay.exc.RequestExpired`
exception will be raised.
"""
if not issuer:
issuer = _get_issuer(app_req=app_req)
try:
expires = float(str(app_req.get('exp')))
issued = float(str(app_req.get('iat')))
except ValueError:
_re_raise_as(InvalidJWT,
'JWT had an invalid exp (%r) or iat (%r) '
% (app_req.get('exp'), app_req.get('iat')),
issuer=issuer)
now = calendar.timegm(time.gmtime())
if expires < now:
raise RequestExpired('JWT expired: %s UTC < %s UTC '
'(issued at %s UTC)'
% (datetime.utcfromtimestamp(expires),
datetime.utcfromtimestamp(now),
datetime.utcfromtimestamp(issued)),
issuer=issuer)
if issued < (now - 3600): # issued more than an hour ago
raise RequestExpired('JWT iat expired: %s UTC < %s UTC '
% (datetime.utcfromtimestamp(issued),
datetime.utcfromtimestamp(now)),
issuer=issuer)
try:
not_before = float(str(app_req.get('nbf')))
except ValueError:
app_req['nbf'] = None # this field is optional
else:
about_now = now + 300 # pad 5 minutes for clock skew
if not_before >= about_now:
raise InvalidJWT('JWT cannot be processed before '
'%s UTC (nbf must be < %s UTC)'
% (datetime.utcfromtimestamp(not_before),
datetime.utcfromtimestamp(about_now)),
issuer=issuer)
def verify_keys(app_req, required_keys, issuer=None):
"""
Verify all JWT object keys listed in required_keys.
Each required key is specified as a dot-separated path.
The key values are returned as a list ordered by how
you specified them.
Take this JWT for example::
{
"iss": "...",
"aud": "...",
"request": {
"pricePoint": 1,
}
}
You could verify the presence of all keys and retrieve
their values like this::
iss, aud, price = verify_keys(jwt_dict,
('iss',
'aud',
'request.pricePoint'))
"""
if not issuer:
issuer = _get_issuer(app_req=app_req)
key_vals = []
for key_path in required_keys:
parent = app_req
for kp in key_path.split('.'):
if not isinstance(parent, dict):
raise InvalidJWT('JWT is missing %r: %s is not a dict'
% (key_path, kp), issuer=issuer)
val = parent.get(kp, None)
if not val:
raise InvalidJWT('JWT is missing %r: %s is not a valid key'
% (key_path, kp), issuer=issuer)
parent = val
key_vals.append(parent) # last value of key_path
return key_vals
def verify_sig(signed_request, secret, issuer=None):
"""
Verify the JWT signature.
Given a raw JWT, this verifies it was signed with
*secret*, decodes it, and returns the JSON dict.
"""
if not issuer:
issuer = _get_issuer(signed_request=signed_request)
signed_request = _to_bytes(signed_request)
app_req = _get_json(signed_request)
# Check signature.
try:
jwt.decode(signed_request, secret, verify=True)
except jwt.DecodeError, exc:
_re_raise_as(InvalidJWT,
'Signature verification failed: %s' % exc,
issuer=issuer)
return app_req
def _get_json(signed_request):
signed_request = _to_bytes(signed_request)
try:
app_req = jwt.decode(signed_request, verify=False)
except jwt.DecodeError, exc:
_re_raise_as(InvalidJWT, 'Invalid JWT: %s' % exc)
if not isinstance(app_req, dict):
try:
app_req = json.loads(app_req)
except ValueError, exc:
_re_raise_as(InvalidJWT,
'Invalid JSON for JWT: %s' % exc)
return app_req
def _get_issuer(signed_request=None, app_req=None):
if not app_req:
if not signed_request:
raise TypeError('need either signed_request or app_req')
app_req = _get_json(signed_request)
# Check JWT issuer.
issuer = app_req.get('iss', None)
if not issuer:
raise InvalidJWT('Payment JWT is missing iss (issuer)')
return issuer
def _to_bytes(signed_request):
try:
return str(signed_request) # must be base64 encoded bytes
except UnicodeEncodeError, exc:
_re_raise_as(InvalidJWT,
'Non-ascii payment JWT: %s' % exc)
def _re_raise_as(NewExc, *args, **kw):
"""Raise a new exception using the preserved traceback of the last one."""
etype, val, tb = sys.exc_info()
raise NewExc(*args, **kw), None, tb
| {
"content_hash": "9ab1e40f2ec2376276b3be32da8a3dbc",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 78,
"avg_line_length": 32.49603174603175,
"alnum_prop": 0.5733300769324704,
"repo_name": "kumar303/moz_inapp_pay",
"id": "773b81d30fd8fdf83cf03b3200f7eaf1ebf888d7",
"size": "8189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moz_inapp_pay/verify.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27325"
}
],
"symlink_target": ""
} |
"""Replay a log file with multiprocessing."""
from __future__ import with_statement
import logging
from multiprocessing.dummy import Pool
import threading
from replay import mapc_time
from apache_replay import generate, request
POOL = Pool(10)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
mapc_time(lambda *args: POOL.apply_async(request, args),
generate('apache-access.log'))
POOL.close()
POOL.join()
| {
"content_hash": "767044d7b8316908d0d6582a585ec440",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 24.210526315789473,
"alnum_prop": 0.7021739130434783,
"repo_name": "ieure/Replay",
"id": "40613ce627fc8ccdd3e100f94081cc7f7afc18b0",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/apache_multiprocess_replay.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4333"
}
],
"symlink_target": ""
} |
import logging
import os.path
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec
from urllib.parse import urlunparse
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.core.files.storage import default_storage
from django.http import HttpResponseForbidden
from django.shortcuts import redirect, render
from django.template import RequestContext, TemplateDoesNotExist
from django.urls import include, path, reverse_lazy
from django.utils.deprecation import MiddlewareMixin
from guardian.conf import settings as guardian_settings
from mptt.utils import tree_item_iterator
from touchtechnology.common.default_settings import SITEMAP_HTTPS_OPTION, SITEMAP_ROOT
from touchtechnology.common.models import SitemapNode
from touchtechnology.common.sitemaps import NodeSitemap
from touchtechnology.common.sites import AccountsSite
from touchtechnology.common.views import login
from touchtechnology.content.models import Redirect
from touchtechnology.content.views import dispatch
DEHYDRATED_URLPATTERNS_KEY = "urlpatterns"
DEHYDRATED_URLPATTERNS_TIMEOUT = 600
logger = logging.getLogger(__name__)
protect = AccountsSite(name="protect")
class SitemapNodeMiddleware(MiddlewareMixin):
def process_request(self, request):
# When working with multiple tenants, we want to shard the cache for
# each of them. Use of the version is a nice way to do this as it will
# prevent collisions while making the API consistent.
v_kw = {}
if hasattr(request, "tenant"):
v_kw.setdefault("version", request.tenant.schema_name)
# TODO write a cache backend that will do this automatically, and
# contribute it back to django-tenant-schemas
dehydrated = cache.get(DEHYDRATED_URLPATTERNS_KEY, [], **v_kw)
if not dehydrated:
logging.getLogger("newrelic.cache").debug("RECALCULATE URLCONF")
# We need a secret set of account urls so we can bounce the user
# here if the page is protected. As we haven't yet embedded our
# url conf (we're in the middle of building it!) this will need
# to be found using a reverse_lazy below.
try:
root = SitemapNode._tree_manager.root_nodes().first()
except ObjectDoesNotExist:
root = None
dehydrated.append(
{"route": "p/", "site": protect, "kwargs": {"node": root}}
)
enabled_nodes = SitemapNode._tree_manager.all()
related_nodes = enabled_nodes.select_related("content_type")
def has_disabled_ancestors(st):
for ancestor in st["ancestors"]:
if not ancestor.enabled:
return True
return False
def get_absolute_url(n, st):
assert not n.is_root_node()
offset = 1 if st["ancestors"][0].slug == SITEMAP_ROOT else 0
paths = [ancestor.slug for ancestor in st["ancestors"][offset:]]
if paths:
return os.path.join(os.path.join(*paths), n.slug)
return n.slug
for node, struct in tree_item_iterator(related_nodes, True, lambda x: x):
# Skip over nodes that they themselves or have disabled ancestors.
if not node.enabled:
logger.debug("%r is disabled, omit from urlconf", node)
continue
if has_disabled_ancestors(struct):
logger.debug("%r has disabled ancestor, omit from urlconf", node)
continue
if node.is_root_node() and node.slug == SITEMAP_ROOT:
part = ""
elif node.is_root_node():
part = node.slug
else:
part = get_absolute_url(node, struct)
if part and settings.APPEND_SLASH:
part += "/"
if (
node.content_type is not None
and node.content_type.model == "placeholder"
):
try:
app = node.object.site(node)
except (AttributeError, ImportError, ValueError):
logger.exception(
"Application is unavailable, disabling this node."
)
node.disable()
else:
pattern = {
"route": part,
"site": app,
"kwargs": dict(node=node, **node.kwargs),
"name": app.name,
}
# When nesting applications we need to ensure that any
# root url is not clobbered by the patterns of the
# parent application. In these cases, force them to the
# top of the map.
if (
node.parent
and node.parent.content_type
and node.parent.content_type.model == "placeholder"
):
dehydrated.insert(0, pattern)
else:
dehydrated.append(pattern)
elif node.object_id is None:
dehydrated.append(
{
"route": part,
"view": dispatch,
"kwargs": dict(node=node, url=part),
"name": f"folder_{node.pk}",
}
)
else:
dehydrated.append(
{
"route": part,
"view": dispatch,
"kwargs": dict(page_id=node.object_id, node=node, url=part),
"name": f"page_{node.object_id if node.object_id else None}",
}
)
cache.set(
DEHYDRATED_URLPATTERNS_KEY,
dehydrated,
timeout=DEHYDRATED_URLPATTERNS_TIMEOUT,
**v_kw,
)
# Always start with the project wide ROOT_URLCONF and add our sitemap.xml view
urlpatterns = [
path(
"sitemap.xml",
sitemap,
{"sitemaps": {"nodes": NodeSitemap}},
name="sitemap",
),
path("", include(settings.ROOT_URLCONF)),
]
# Construct the cache of url pattern definitions. We are not keeping
# the actual patterns, because pickling is problematic for the .url
# instancemethod - instead we keep the skeleton and build it on the
# fly from cache... rehydrating it ;)
for node in dehydrated:
try:
pattern = path(
node["route"],
node["view"],
node["kwargs"],
name=node.get("name"),
)
except KeyError:
pattern = path(
node["route"],
node["site"].urls,
node["kwargs"],
name=node["site"].name,
)
urlpatterns.append(pattern)
# Create a new module on the fly and attach the rehydrated urlpatterns
dynamic_urls = module_from_spec(ModuleSpec("dynamic_urls", None))
dynamic_urls.urlpatterns = urlpatterns
# Attach the module to the request
request.urlconf = dynamic_urls
def process_view(self, request, view_func, view_args, view_kwargs):
node = view_kwargs.get("node")
if node:
required = node.restrict_to_groups.all()
if required:
next = node.get_absolute_url()
to = reverse_lazy("accounts:login")
# An anonymous user will never be a member of a group, so make
# them go off and be authenticated.
if request.user.is_anonymous():
return login(request, to=to, next=next)
# A user who is not a member of a suitable group should get a
# 403 page.
groups = request.user.groups.all()
if set(required).difference(groups):
if guardian_settings.RENDER_403:
try:
response = render(
request,
guardian_settings.TEMPLATE_403,
{},
RequestContext(request),
)
response.status_code = 403
return response
except TemplateDoesNotExist as e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
if (
SITEMAP_HTTPS_OPTION
and node
and node.require_https
and not request.is_secure()
):
host = request.META.get("HTTP_HOST")
path = request.META.get("PATH_INFO")
redirect_to = urlunparse(("https", host, path, "", "", ""))
return redirect(redirect_to)
def redirect_middleware(get_response):
def middleware(request):
try:
obj = Redirect.objects.get(source_url__exact=request.path)
except ObjectDoesNotExist:
response = get_response(request)
else:
response = redirect(obj.destination_url, obj.permanent)
return response
return middleware
| {
"content_hash": "f16ff888f90506963ee3e60af4207b55",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 89,
"avg_line_length": 40.17120622568093,
"alnum_prop": 0.519953506392871,
"repo_name": "goodtune/vitriolic",
"id": "5fe17665799aba3383fd47a83985a499813a474e",
"size": "10324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "touchtechnology/content/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "307509"
},
{
"name": "HTML",
"bytes": "273967"
},
{
"name": "JavaScript",
"bytes": "626908"
},
{
"name": "Less",
"bytes": "1373"
},
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "962353"
},
{
"name": "Shell",
"bytes": "1490"
},
{
"name": "XSLT",
"bytes": "3510"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import collections
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from novaclient import api_versions
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from novaclient.v2 import instance_action as nova_instance_action
from novaclient.v2 import list_extensions as nova_list_extensions
from novaclient.v2 import servers as nova_servers
from horizon import exceptions as horizon_exceptions
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized
from horizon.utils.memoized import memoized_with_request
from openstack_dashboard.api import base
from openstack_dashboard.api import microversions
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
# Supported compute versions
VERSIONS = base.APIVersionManager("compute", preferred_version=2)
VERSIONS.load_supported_version(1.1, {"client": nova_client, "version": 1.1})
VERSIONS.load_supported_version(2, {"client": nova_client, "version": 2})
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
INSECURE = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
CACERT = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
def get_microversion(request, feature):
client = novaclient(request)
min_ver, max_ver = api_versions._get_server_version_range(client)
return (microversions.get_microversion_for_feature(
'nova', feature, api_versions.APIVersion, min_ver, max_ver))
def is_feature_available(request, feature):
return bool(get_microversion(request, feature))
class VNCConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class RDPConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_rdp_console method.
"""
_attrs = ['url', 'type']
class SerialConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_serial_console method.
"""
_attrs = ['url', 'type']
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server.
Preserves the request info so image name can later be retrieved.
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'locked',
'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state',
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-AZ:availability_zone', 'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
import glanceclient.exc as glance_exceptions
from openstack_dashboard.api import glance
if not self.image:
return None
if hasattr(self.image, 'name'):
return self.image.name
if 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
self.image['name'] = image.name
return image.name
except (glance_exceptions.ClientException,
horizon_exceptions.ServiceCatalogException):
self.image['name'] = None
return None
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
@property
def host_server(self):
return getattr(self, 'OS-EXT-SRV-ATTR:host', '')
class Hypervisor(base.APIDictWrapper):
"""Simple wrapper around novaclient.hypervisors.Hypervisor."""
_attrs = ['manager', '_loaded', '_info', 'hypervisor_hostname', 'id',
'servers']
@property
def servers(self):
# if hypervisor doesn't have servers, the attribute is not present
servers = []
try:
servers = self._apidict.servers
except Exception:
pass
return servers
class NovaUsage(base.APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': self.vcpus,
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours,
'memory_mb_hours': self.memory_mb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_vcpus_usage", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
@property
def memory_mb_hours(self):
return getattr(self, "total_memory_mb_usage", 0)
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
def get_auth_params_from_request(request):
"""Extracts properties needed by novaclient call from the request object.
These will be used to memoize the calls to novaclient.
"""
return (
request.user.username,
request.user.token.id,
request.user.tenant_id,
request.user.token.project.get('domain_id'),
base.url_for(request, 'compute'),
base.url_for(request, 'identity')
)
@memoized_with_request(get_auth_params_from_request)
def novaclient(request_auth_params, version=None):
(
username,
token_id,
project_id,
project_domain_id,
nova_url,
auth_url
) = request_auth_params
if version is None:
version = VERSIONS.get_active_version()['version']
c = nova_client.Client(version,
username,
token_id,
project_id=project_id,
project_domain_id=project_domain_id,
auth_url=auth_url,
insecure=INSECURE,
cacert=CACERT,
http_log_debug=settings.DEBUG,
auth_token=token_id,
endpoint_override=nova_url)
return c
def upgrade_api(request, client, version):
"""Ugrade the nova API to the specified version if possible."""
min_ver, max_ver = api_versions._get_server_version_range(client)
if min_ver <= api_versions.APIVersion(version) <= max_ver:
client = novaclient(request, version)
return client
@profiler.trace
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(
instance_id, console_type)['console'])
@profiler.trace
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
@profiler.trace
def server_rdp_console(request, instance_id, console_type='rdp-html5'):
return RDPConsole(novaclient(request).servers.get_rdp_console(
instance_id, console_type)['console'])
@profiler.trace
def server_serial_console(request, instance_id, console_type='serial'):
return SerialConsole(novaclient(request).servers.get_serial_console(
instance_id, console_type)['console'])
@profiler.trace
def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
ephemeral=0, swap=0, metadata=None, is_public=True,
rxtx_factor=1):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
flavorid=flavorid,
ephemeral=ephemeral,
swap=swap, is_public=is_public,
rxtx_factor=rxtx_factor)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
@profiler.trace
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
@profiler.trace
def flavor_get(request, flavor_id, get_extras=False):
flavor = novaclient(request).flavors.get(flavor_id)
if get_extras:
flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
return flavor
@profiler.trace
@memoized
def flavor_list(request, is_public=True, get_extras=False):
"""Get the list of available instance sizes (flavors)."""
flavors = novaclient(request).flavors.list(is_public=is_public)
if get_extras:
for flavor in flavors:
flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
return flavors
@profiler.trace
def update_pagination(entities, page_size, marker, sort_dir, sort_key,
reversed_order):
has_more_data = has_prev_data = False
if len(entities) > page_size:
has_more_data = True
entities.pop()
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif reversed_order and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
# restore the original ordering here
if reversed_order:
entities = sorted(entities, key=lambda entity:
(getattr(entity, sort_key) or '').lower(),
reverse=(sort_dir == 'asc'))
return entities, has_more_data, has_prev_data
@profiler.trace
@memoized
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None,
paginate=False, sort_key="name", sort_dir="desc",
reversed_order=False):
"""Get the list of available instance sizes (flavors)."""
has_more_data = False
has_prev_data = False
if paginate:
if reversed_order:
sort_dir = 'desc' if sort_dir == 'asc' else 'asc'
page_size = utils.get_page_size(request)
flavors = novaclient(request).flavors.list(is_public=is_public,
marker=marker,
limit=page_size + 1,
sort_key=sort_key,
sort_dir=sort_dir)
flavors, has_more_data, has_prev_data = update_pagination(
flavors, page_size, marker, sort_dir, sort_key, reversed_order)
else:
flavors = novaclient(request).flavors.list(is_public=is_public)
if get_extras:
for flavor in flavors:
flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
return (flavors, has_more_data, has_prev_data)
@profiler.trace
@memoized_with_request(novaclient)
def flavor_access_list(nova_api, flavor=None):
"""Get the list of access instance sizes (flavors)."""
return nova_api.flavor_access.list(flavor=flavor)
@profiler.trace
def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant)
@profiler.trace
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant)
@profiler.trace
def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
"""Get flavor extra specs."""
if flavor is None:
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
@profiler.trace
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
@profiler.trace
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
@profiler.trace
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
@profiler.trace
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
@profiler.trace
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
@profiler.trace
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
@profiler.trace
def keypair_list(request):
return novaclient(request).keypairs.list()
@profiler.trace
def keypair_get(request, keypair_id):
return novaclient(request).keypairs.get(keypair_id)
@profiler.trace
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=None,
disk_config=None, config_drive=None, meta=None,
scheduler_hints=None):
return Server(novaclient(request).servers.create(
name.strip(), image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
min_count=instance_count, admin_pass=admin_pass,
disk_config=disk_config, config_drive=config_drive,
meta=meta, scheduler_hints=scheduler_hints), request)
@profiler.trace
def server_delete(request, instance_id):
novaclient(request).servers.delete(instance_id)
def get_novaclient_with_locked_status(request):
microversion = get_microversion(request, "locked_attribute")
return novaclient(request, version=microversion)
@profiler.trace
def server_get(request, instance_id):
return Server(get_novaclient_with_locked_status(request).servers.get(
instance_id), request)
@profiler.trace
def server_list(request, search_opts=None, detailed=True):
nova_client = get_novaclient_with_locked_status(request)
page_size = utils.get_page_size(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
all_tenants = search_opts.get('all_tenants', False)
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in nova_client.servers.list(detailed, search_opts)]
has_more_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT',
1000):
has_more_data = True
return (servers, has_more_data)
@profiler.trace
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
@profiler.trace
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
@profiler.trace
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
@profiler.trace
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
@profiler.trace
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
@profiler.trace
def server_shelve(request, instance_id):
novaclient(request).servers.shelve(instance_id)
@profiler.trace
def server_unshelve(request, instance_id):
novaclient(request).servers.unshelve(instance_id)
@profiler.trace
def server_reboot(request, instance_id, soft_reboot=False):
hardness = nova_servers.REBOOT_HARD
if soft_reboot:
hardness = nova_servers.REBOOT_SOFT
novaclient(request).servers.reboot(instance_id, hardness)
@profiler.trace
def server_rebuild(request, instance_id, image_id, password=None,
disk_config=None):
return novaclient(request).servers.rebuild(instance_id, image_id,
password, disk_config)
@profiler.trace
def server_update(request, instance_id, name):
return novaclient(request).servers.update(instance_id, name=name.strip())
@profiler.trace
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
@profiler.trace
def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit=False):
novaclient(request).servers.live_migrate(instance_id, host,
block_migration,
disk_over_commit)
@profiler.trace
def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
novaclient(request).servers.resize(instance_id, flavor,
disk_config, **kwargs)
@profiler.trace
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
@profiler.trace
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
@profiler.trace
def server_start(request, instance_id):
novaclient(request).servers.start(instance_id)
@profiler.trace
def server_stop(request, instance_id):
novaclient(request).servers.stop(instance_id)
@profiler.trace
def server_lock(request, instance_id):
microversion = get_microversion(request, "locked_attribute")
novaclient(request, version=microversion).servers.lock(instance_id)
@profiler.trace
def server_unlock(request, instance_id):
microversion = get_microversion(request, "locked_attribute")
novaclient(request, version=microversion).servers.unlock(instance_id)
@profiler.trace
def server_metadata_update(request, instance_id, metadata):
novaclient(request).servers.set_meta(instance_id, metadata)
@profiler.trace
def server_metadata_delete(request, instance_id, keys):
novaclient(request).servers.delete_meta(instance_id, keys)
@profiler.trace
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.get(tenant_id))
@profiler.trace
def tenant_quota_update(request, tenant_id, **kwargs):
if kwargs:
novaclient(request).quotas.update(tenant_id, **kwargs)
@profiler.trace
def default_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.defaults(tenant_id))
@profiler.trace
def default_quota_update(request, **kwargs):
novaclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def _get_usage_marker(usage):
marker = None
if hasattr(usage, 'server_usages') and usage.server_usages:
marker = usage.server_usages[-1].get('instance_id')
return marker
def _get_usage_list_marker(usage_list):
marker = None
if usage_list:
marker = _get_usage_marker(usage_list[-1])
return marker
def _merge_usage(usage, next_usage):
usage.server_usages.extend(next_usage.server_usages)
usage.total_hours += next_usage.total_hours
usage.total_memory_mb_usage += next_usage.total_memory_mb_usage
usage.total_vcpus_usage += next_usage.total_vcpus_usage
usage.total_local_gb_usage += next_usage.total_local_gb_usage
def _merge_usage_list(usages, next_usage_list):
for next_usage in next_usage_list:
if next_usage.tenant_id in usages:
_merge_usage(usages[next_usage.tenant_id], next_usage)
else:
usages[next_usage.tenant_id] = next_usage
@profiler.trace
def usage_get(request, tenant_id, start, end):
client = upgrade_api(request, novaclient(request), '2.40')
usage = client.usage.get(tenant_id, start, end)
if client.api_version >= api_versions.APIVersion('2.40'):
# If the number of instances used to calculate the usage is greater
# than max_limit, the usage will be split across multiple requests
# and the responses will need to be merged back together.
marker = _get_usage_marker(usage)
while marker:
next_usage = client.usage.get(tenant_id, start, end, marker=marker)
marker = _get_usage_marker(next_usage)
if marker:
_merge_usage(usage, next_usage)
return NovaUsage(usage)
@profiler.trace
def usage_list(request, start, end):
client = upgrade_api(request, novaclient(request), '2.40')
usage_list = client.usage.list(start, end, True)
if client.api_version >= api_versions.APIVersion('2.40'):
# If the number of instances used to calculate the usage is greater
# than max_limit, the usage will be split across multiple requests
# and the responses will need to be merged back together.
usages = collections.OrderedDict()
_merge_usage_list(usages, usage_list)
marker = _get_usage_list_marker(usage_list)
while marker:
next_usage_list = client.usage.list(start, end, True,
marker=marker)
marker = _get_usage_list_marker(next_usage_list)
if marker:
_merge_usage_list(usages, next_usage_list)
usage_list = usages.values()
return [NovaUsage(u) for u in usage_list]
@profiler.trace
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
@profiler.trace
def get_x509_credentials(request):
return novaclient(request).certs.create()
@profiler.trace
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
@profiler.trace
def get_password(request, instance_id, private_key=None):
return novaclient(request).servers.get_password(instance_id, private_key)
@profiler.trace
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
@profiler.trace
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
@profiler.trace
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api import cinder
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinder.cinderclient(request).volumes.get(volume.id)
volume.name = cinder.Volume(volume_data).name
return volumes
@profiler.trace
def hypervisor_list(request):
return novaclient(request).hypervisors.list()
@profiler.trace
def hypervisor_stats(request):
return novaclient(request).hypervisors.statistics()
@profiler.trace
def hypervisor_search(request, query, servers=True):
return novaclient(request).hypervisors.search(query, servers)
@profiler.trace
def evacuate_host(request, host, target=None, on_shared_storage=False):
# TODO(jmolle) This should be change for nova atomic api host_evacuate
hypervisors = novaclient(request).hypervisors.search(host, True)
response = []
err_code = None
for hypervisor in hypervisors:
hyper = Hypervisor(hypervisor)
# if hypervisor doesn't have servers, the attribute is not present
for server in hyper.servers:
try:
novaclient(request).servers.evacuate(server['uuid'],
target,
on_shared_storage)
except nova_exceptions.ClientException as err:
err_code = err.code
msg = _("Name: %(name)s ID: %(uuid)s")
msg = msg % {'name': server['name'], 'uuid': server['uuid']}
response.append(msg)
if err_code:
msg = _('Failed to evacuate instances: %s') % ', '.join(response)
raise nova_exceptions.ClientException(err_code, msg)
return True
@profiler.trace
def migrate_host(request, host, live_migrate=False, disk_over_commit=False,
block_migration=False):
hypervisors = novaclient(request).hypervisors.search(host, True)
response = []
err_code = None
for hyper in hypervisors:
for server in getattr(hyper, "servers", []):
try:
if live_migrate:
instance = server_get(request, server['uuid'])
# Checking that instance can be live-migrated
if instance.status in ["ACTIVE", "PAUSED"]:
novaclient(request).servers.live_migrate(
server['uuid'],
None,
block_migration,
disk_over_commit
)
else:
novaclient(request).servers.migrate(server['uuid'])
else:
novaclient(request).servers.migrate(server['uuid'])
except nova_exceptions.ClientException as err:
err_code = err.code
msg = _("Name: %(name)s ID: %(uuid)s")
msg = msg % {'name': server['name'], 'uuid': server['uuid']}
response.append(msg)
if err_code:
msg = _('Failed to migrate instances: %s') % ', '.join(response)
raise nova_exceptions.ClientException(err_code, msg)
return True
@profiler.trace
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# Workaround for nova bug 1370867 that absolute_limits
# returns negative value for total.*Used instead of 0.
# For such case, replace negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
@profiler.trace
def availability_zone_list(request, detailed=False):
return novaclient(request).availability_zones.list(detailed=detailed)
@profiler.trace
def server_group_list(request):
return novaclient(request).server_groups.list()
@profiler.trace
def service_list(request, binary=None):
return novaclient(request).services.list(binary=binary)
@profiler.trace
def service_enable(request, host, binary):
return novaclient(request).services.enable(host, binary)
@profiler.trace
def service_disable(request, host, binary, reason=None):
if reason:
return novaclient(request).services.disable_log_reason(host,
binary, reason)
else:
return novaclient(request).services.disable(host, binary)
@profiler.trace
def aggregate_details_list(request):
result = []
c = novaclient(request)
for aggregate in c.aggregates.list():
result.append(c.aggregates.get_details(aggregate.id))
return result
@profiler.trace
def aggregate_create(request, name, availability_zone=None):
return novaclient(request).aggregates.create(name, availability_zone)
@profiler.trace
def aggregate_delete(request, aggregate_id):
return novaclient(request).aggregates.delete(aggregate_id)
@profiler.trace
def aggregate_get(request, aggregate_id):
return novaclient(request).aggregates.get(aggregate_id)
@profiler.trace
def aggregate_update(request, aggregate_id, values):
return novaclient(request).aggregates.update(aggregate_id, values)
@profiler.trace
def aggregate_set_metadata(request, aggregate_id, metadata):
return novaclient(request).aggregates.set_metadata(aggregate_id, metadata)
@profiler.trace
def host_list(request):
return novaclient(request).hosts.list()
@profiler.trace
def add_host_to_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.add_host(aggregate_id, host)
@profiler.trace
def remove_host_from_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.remove_host(aggregate_id, host)
@profiler.trace
def interface_attach(request,
server, port_id=None, net_id=None, fixed_ip=None):
return novaclient(request).servers.interface_attach(server,
port_id,
net_id,
fixed_ip)
@profiler.trace
def interface_detach(request, server, port_id):
return novaclient(request).servers.interface_detach(server, port_id)
@profiler.trace
@memoized_with_request(novaclient)
def list_extensions(nova_api):
"""List all nova extensions, except the ones in the blacklist."""
blacklist = set(getattr(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', []))
return tuple(
extension for extension in
nova_list_extensions.ListExtManager(nova_api).show_all()
if extension.name not in blacklist
)
@profiler.trace
@memoized_with_request(list_extensions, 1)
def extension_supported(extension_name, extensions):
"""Determine if nova supports a given extension name.
Example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
for extension in extensions:
if extension.name == extension_name:
return True
return False
@profiler.trace
def can_set_server_password():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('can_set_password', False)
@profiler.trace
def instance_action_list(request, instance_id):
return nova_instance_action.InstanceActionManager(
novaclient(request)).list(instance_id)
@profiler.trace
def can_set_mount_point():
"""Return the Hypervisor's capability of setting mount points."""
hypervisor_features = getattr(
settings, "OPENSTACK_HYPERVISOR_FEATURES", {})
return hypervisor_features.get("can_set_mount_point", False)
@profiler.trace
def requires_keypair():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('requires_keypair', False)
def can_set_quotas():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('enable_quotas', True)
| {
"content_hash": "32ccc06d4ccfebdf3e617a1bddecd9c0",
"timestamp": "",
"source": "github",
"line_count": 1018,
"max_line_length": 79,
"avg_line_length": 32.445972495088405,
"alnum_prop": 0.6448985770511656,
"repo_name": "yeming233/horizon",
"id": "d6658e21a51884d162c3e5f3b23dec612240c5a8",
"size": "33894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/nova.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105527"
},
{
"name": "HTML",
"bytes": "517093"
},
{
"name": "JavaScript",
"bytes": "953373"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4845896"
},
{
"name": "Shell",
"bytes": "18658"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
long_desc = """
Usage
-----
::
rst2db <filename> [-e root_element] [-o output_file]
Only the filename to process is required. All other settings are optional.
Settings:
-e root_element set the root element of the resulting docbook file. If this
is not specified, then 'section' will be used.
-o output_file set the output filename to write. If this is not specified,
then output will be sent to stdout.
"""
setup(name='msrst2db',
description="""
A reStructuredText to DocBook converter using Python's docutils.""",
version='1.4',
install_requires=['docutils>=0.12', 'lxml>=2.3'],
extras_require={'dev': ['check-manifest',
'ipdb',
'twine',
'wheel']},
packages=find_packages(),
entry_points={
'console_scripts': [ 'rst2db = abstrys.cmd_rst2db:run' ],
},
author='Aleksei Badyaev',
author_email='a.badyaev@mousesoft.tk',
url='https://github.com/Aleksei-Badyaev/rst2db',
)
| {
"content_hash": "e78418d4a3d4dac7858956d4ef96c944",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 30.07894736842105,
"alnum_prop": 0.5783027121609798,
"repo_name": "Aleksei-Badyaev/rst2db",
"id": "9166a4aa6b4411c677012ee1e8077e7d1505e6dc",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26121"
}
],
"symlink_target": ""
} |
import configparser
Config = configparser.ConfigParser()
Config.read("config.ini")
print(Config.sections())
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
Name = ConfigSectionMap("SectionOne")['name']
Age = ConfigSectionMap("SectionOne")['age']
print ("Hello %s. You are %s years old." % (Name, Age))
port = Config.get("Settings", "Port")
print (port) | {
"content_hash": "bb95e7dbae3c67148096bfc5d38ef7a7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 55,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.6845528455284553,
"repo_name": "enorenio/test",
"id": "69f44bfcd45807a0257a81f8c5dfba6576ea47cb",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dsm/cfgtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "562"
},
{
"name": "Python",
"bytes": "47805"
}
],
"symlink_target": ""
} |
import socket
import sys
import uuid
import mock
import unittest2 as unittest
from quantum.agent.common import config
from quantum.agent.linux import interface
from quantum.agent.linux import utils
from quantum.common import exceptions
from quantum.debug import commands
from quantum.debug.debug_agent import DEVICE_OWNER_PROBE, QuantumDebugAgent
from quantum.openstack.common import cfg
class MyApp(object):
def __init__(self, _stdout):
self.stdout = _stdout
class TestDebugCommands(unittest.TestCase):
def setUp(self):
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF.register_opts(QuantumDebugAgent.OPTS)
cfg.CONF(args=['quantum-debug'], project='quantum')
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.root_helper = 'sudo'
self.addCleanup(mock.patch.stopall)
device_exists_p = mock.patch(
'quantum.agent.linux.ip_lib.device_exists', return_value=False)
device_exists_p.start()
namespace_p = mock.patch(
'quantum.agent.linux.ip_lib.IpNetnsCommand')
namespace_p.start()
ensure_namespace_p = mock.patch(
'quantum.agent.linux.ip_lib.IPWrapper.ensure_namespace')
ensure_namespace_p.start()
dvr_cls_p = mock.patch('quantum.agent.linux.interface.NullDriver')
driver_cls = dvr_cls_p.start()
mock_driver = mock.MagicMock()
mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
mock_driver.get_device_name.return_value = 'tap12345678-12'
driver_cls.return_value = mock_driver
self.driver = mock_driver
client_cls_p = mock.patch('quantumclient.v2_0.client.Client')
client_cls = client_cls_p.start()
client_inst = mock.Mock()
client_cls.return_value = client_inst
fake_network = {'network': {'id': 'fake_net',
'tenant_id': 'fake_tenant',
'subnets': ['fake_subnet']}}
fake_port = {'port':
{'id': 'fake_port',
'device_owner': 'fake_device',
'mac_address': 'aa:bb:cc:dd:ee:ffa',
'network_id': 'fake_net',
'fixed_ips':
[{'subnet_id': 'fake_subnet', 'ip_address':'10.0.0.3'}]
}}
fake_ports = {'ports': [fake_port['port']]}
self.fake_ports = fake_ports
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
fake_subnet_v4 = {'subnet': {'name': 'fake_subnet_v4',
'id': 'fake_subnet',
'network_id': 'fake_net',
'gateway_ip': '10.0.0.1',
'dns_nameservers': ['10.0.0.2'],
'host_routes': [],
'cidr': '10.0.0.0/24',
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'ip_version': 4}}
client_inst.list_ports.return_value = fake_ports
client_inst.create_port.return_value = fake_port
client_inst.show_port.return_value = fake_port
client_inst.show_network.return_value = fake_network
client_inst.show_subnet.return_value = fake_subnet_v4
self.client = client_inst
mock_std = mock.Mock()
self.app = MyApp(mock_std)
self.app.debug_agent = QuantumDebugAgent(cfg.CONF,
client_inst,
mock_driver)
def test_create_probe(self):
cmd = commands.CreateProbe(self.app, None)
cmd_parser = cmd.get_parser('create_probe')
args = ['fake_net']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
fake_port = {'port':
{'device_owner': DEVICE_OWNER_PROBE,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.create_port(fake_port),
mock.call.show_subnet('fake_subnet')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.plug('fake_net',
'fake_port',
'tap12345678-12',
'aa:bb:cc:dd:ee:ffa',
bridge=None,
namespace=namespace),
mock.call.init_l3('tap12345678-12',
['10.0.0.3/24'],
namespace=namespace
)])
def test_create_probe_external(self):
fake_network = {'network': {'id': 'fake_net',
'tenant_id': 'fake_tenant',
'router:external': True,
'subnets': ['fake_subnet']}}
self.client.show_network.return_value = fake_network
cmd = commands.CreateProbe(self.app, None)
cmd_parser = cmd.get_parser('create_probe')
args = ['fake_net']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
fake_port = {'port':
{'device_owner': DEVICE_OWNER_PROBE,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.create_port(fake_port),
mock.call.show_subnet('fake_subnet')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.plug('fake_net',
'fake_port',
'tap12345678-12',
'aa:bb:cc:dd:ee:ffa',
bridge='br-ex',
namespace=namespace),
mock.call.init_l3('tap12345678-12',
['10.0.0.3/24'],
namespace=namespace
)])
def test_delete_probe(self):
cmd = commands.DeleteProbe(self.app, None)
cmd_parser = cmd.get_parser('delete_probe')
args = ['fake_port']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
namespace=namespace,
bridge=None)])
def test_delete_probe_external(self):
fake_network = {'network': {'id': 'fake_net',
'tenant_id': 'fake_tenant',
'router:external': True,
'subnets': ['fake_subnet']}}
self.client.show_network.return_value = fake_network
cmd = commands.DeleteProbe(self.app, None)
cmd_parser = cmd.get_parser('delete_probe')
args = ['fake_port']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
namespace=namespace,
bridge='br-ex')])
def test_delete_probe_without_namespace(self):
cfg.CONF.set_override('use_namespaces', False)
cmd = commands.DeleteProbe(self.app, None)
cmd_parser = cmd.get_parser('delete_probe')
args = ['fake_port']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.client.assert_has_calls([mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
bridge=None)])
def test_list_probe(self):
cmd = commands.ListProbe(self.app, None)
cmd_parser = cmd.get_parser('list_probe')
args = []
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.client.assert_has_calls(
[mock.call.list_ports(device_owner=DEVICE_OWNER_PROBE)])
def test_exec_command(self):
cmd = commands.ExecProbe(self.app, None)
cmd_parser = cmd.get_parser('exec_command')
args = ['fake_port', 'fake_command']
parsed_args = cmd_parser.parse_args(args)
with mock.patch('quantum.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
self.client.assert_has_calls([mock.call.show_port('fake_port')])
def test_exec_command_without_namespace(self):
cfg.CONF.set_override('use_namespaces', False)
cmd = commands.ExecProbe(self.app, None)
cmd_parser = cmd.get_parser('exec_command')
args = ['fake_port', 'fake_command']
parsed_args = cmd_parser.parse_args(args)
with mock.patch('quantum.agent.linux.utils.execute') as exe:
cmd.run(parsed_args)
exe.assert_has_calls([mock.call.execute(mock.ANY)])
self.client.assert_has_calls([mock.call.show_port('fake_port')])
def test_clear_probe(self):
cmd = commands.ClearProbe(self.app, None)
cmd_parser = cmd.get_parser('clear_probe')
args = []
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.list_ports(
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_PROBE),
mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
namespace=namespace,
bridge=None)])
def test_ping_all_with_ensure_port(self):
fake_ports = self.fake_ports
def fake_port_list(network_id=None, device_owner=None, device_id=None):
if network_id:
# In order to test ensure_port, return []
return {'ports': []}
return fake_ports
self.client.list_ports.side_effect = fake_port_list
cmd = commands.PingAll(self.app, None)
cmd_parser = cmd.get_parser('ping_all')
args = []
parsed_args = cmd_parser.parse_args(args)
namespace = 'qprobe-fake_port'
with mock.patch('quantum.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
fake_port = {'port':
{'device_owner': DEVICE_OWNER_PROBE,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
expected = [mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.create_port(fake_port),
mock.call.show_subnet('fake_subnet')]
self.client.assert_has_calls(expected)
self.driver.assert_has_calls([mock.call.init_l3('tap12345678-12',
['10.0.0.3/24'],
namespace=namespace
)])
def test_ping_all(self):
cmd = commands.PingAll(self.app, None)
cmd_parser = cmd.get_parser('ping_all')
args = []
parsed_args = cmd_parser.parse_args(args)
with mock.patch('quantum.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
fake_port = {'port':
{'device_owner': DEVICE_OWNER_PROBE,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
expected = [mock.call.list_ports(),
mock.call.list_ports(network_id='fake_net',
device_owner=DEVICE_OWNER_PROBE,
device_id=socket.gethostname()),
mock.call.show_subnet('fake_subnet'),
mock.call.show_port('fake_port')]
self.client.assert_has_calls(expected)
def test_ping_all_v6(self):
fake_subnet_v6 = {'subnet': {'name': 'fake_v6',
'ip_version': 6}}
self.client.show_subnet.return_value = fake_subnet_v6
cmd = commands.PingAll(self.app, None)
cmd_parser = cmd.get_parser('ping_all')
args = []
parsed_args = cmd_parser.parse_args(args)
with mock.patch('quantum.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
self.client.assert_has_calls([mock.call.list_ports()])
| {
"content_hash": "e437a94d2184d5bee3b1c653f70e5830",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 79,
"avg_line_length": 49.18098159509202,
"alnum_prop": 0.4826919478575438,
"repo_name": "aristanetworks/arista-ovs-quantum",
"id": "1a8fe9295e1f76bf031ba7d2b369a005151d46f0",
"size": "16721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/tests/unit/test_debug_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "2568389"
},
{
"name": "Scala",
"bytes": "4525"
},
{
"name": "Shell",
"bytes": "7843"
},
{
"name": "XML",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
rot.progress
~~~~~~~~~~~~
Progress indicators.
:Copyright: © 2013, Kwpolska.
:License: BSD (see /LICENSE).
"""
import sys
import time
from rot import throbber
__all__ = ['FrontProgress', 'FrontProgressThrobber']
class FrontProgress(object):
"""A static progress indicator with numbers.
Usage::
fp = Progress(2, 'Doing step 1...')
step1()
fp.step('Doing step 2...')
step2()
fp.step()
Or (with static message)::
fp = Progress(2, 'Performing an action...')
step1()
fp.step()
step2()
fp.step()
"""
current = -1
total = 1
_pml = 0
def __init__(self, total, msg=u'Working...', end_with_newline=True):
"""Initialize a Progress message."""
self.total = total
self.msg = msg
self.end_with_newline = end_with_newline
self.step(msg)
def step(self, msg=None, newline=False):
"""Print a progress message."""
if msg is None:
msg = self.msg
else:
self.msg = msg
self.current += 1
ln = len(str(self.total))
sys.stdout.write(u'\r' + ((ln * 2 + 4 + self._pml) * u' '))
self._pml = len(msg)
sys.stdout.write(u'\r')
sys.stdout.flush()
sys.stdout.write((u'({0:>' + str(ln) + u'}/{1}) ').format(self.current,
self.total))
sys.stdout.write(msg)
sys.stdout.write(u'\r')
sys.stdout.flush()
if newline:
sys.stdout.write(u'\n')
sys.stdout.flush()
if self.current == self.total:
if self.end_with_newline:
sys.stdout.write(u'\n')
sys.stdout.flush()
self.total = 0
self.current = 0
class FrontProgressThrobber(FrontProgress, throbber.Throbber):
"""An animated progress throbber.
Similar to Progress, but the / is animated.
Usage::
with ProgressThrobber(2, 'Doing stuff...') as pt:
dostuff()
pt.step('Cleaning up...')
cleanup()
"""
printback = True
def __init__(self, total, msg=u'Working...', finalthrob=u'/',
end_with_newline=True):
self.total = total
self.msg = msg
self.finalthrob = finalthrob
self.ln = len(str(self.total))
self.step(msg)
def _throb(self):
"""Display a throbber."""
self.throb = True
i = 0
while self.throb:
sys.stdout.write((u'\r({0:>' + str(self.ln) +
u'}{1}{2}) {3}').format(self.current,
self.states[i],
self.total, self.msg))
sys.stdout.write(u'\r')
sys.stdout.flush()
time.sleep(0.1)
i += 1
if i == len(self.states):
i = 0
sys.stdout.write(u'\r({0}{1}{2}) {3}'.format(self.current,
self.finalthrob,
self.total, self.msg))
sys.stdout.flush()
time.sleep(0.1)
if self.printback:
sys.stdout.write(u'\n')
sys.stdout.flush()
def step(self, msg=None):
if msg is None:
msg = self.msg
sys.stdout.write(u'\r' + ((self.ln * 2 + 4 + self._pml) * u' '))
self._pml = len(msg)
self.current += 1
self.msg = msg
| {
"content_hash": "47e7a1c2d0a1138da651fbdd62d85ba6",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 27.7,
"alnum_prop": 0.4645931685642877,
"repo_name": "Kwpolska/rot",
"id": "850578dff3373ac6c728cd2d2a13bed3be85a130",
"size": "3748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rot/progress.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2811"
},
{
"name": "Python",
"bytes": "28282"
},
{
"name": "Shell",
"bytes": "12968"
}
],
"symlink_target": ""
} |
'''
Created on Apr 9, 2014
@author: Simon
'''
import matplotlib
from utils import logging
# !important: tell matplotlib not to try rendering to a window
matplotlib.use('Agg')
import io
import struct
from skimage import io as skio
import base64
class NImageInputProtocol(object):
'''
MrJob input protocol that reads a number of PNG images that are encoded as
follows: Base64 of | no. of bytes of image (4 bytes) | image bytes | ...
The result is a list of numpy arrays.
The write method encodes a list of images (numpy arrays) as base64 byte
string in the same way as the input for the read method.
'''
def read(self, data):
key, enc_value = data.split('\t', 1)
value = base64.b64decode(enc_value)
pos = 0
image_arrs = []
logging.info('decoded number of bytes: ' + str(len(value)))
while pos < len(value):
image_len = struct.unpack('>i', value[pos:pos+4])[0]
pos += 4
logging.info('reading image of length: ' + str(image_len) + '\n')
image_arr = skio.imread(io.BytesIO(value[pos:pos + image_len]))
logging.info('done reading')
image_arrs.append(image_arr)
pos += image_len
logging.info('Got ' + str(len(image_arrs)) + ' images')
return key, image_arrs
def write(self, key, img_list):
logging.info('Writing ' + str(len(img_list)) + ' images')
byte_stream = io.BytesIO()
for img in img_list:
# get image bytes
temp_stream = io.BytesIO()
skio.imsave(temp_stream, img)
img_bytes = temp_stream.getvalue()
temp_stream.close()
# get length of bytes in four bytes
img_len = len(img_bytes)
logging.info('Writing image of length ' + str(img_len))
len_bytes = bytearray(struct.pack('>i', img_len))
# save length and image bytes to the result
byte_stream.write(str(len_bytes))
byte_stream.write(img_bytes)
final_bytes = byte_stream.getvalue()
byte_stream.close()
encoded = base64.b64encode(final_bytes)
logging.info('Done writing. Final number of bytes: ' + str(len(final_bytes)))
return '%s\t%s' % (key, encoded)
| {
"content_hash": "e2a71215b1776aeed3aa93a8f4ab026b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 85,
"avg_line_length": 35.39393939393939,
"alnum_prop": 0.5856164383561644,
"repo_name": "xapharius/HadoopML",
"id": "4d76b502d61da1457a2bee004272be0c7f1b7402",
"size": "2336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Engine/src/protocol/n_image_input_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "527"
},
{
"name": "Java",
"bytes": "20163"
},
{
"name": "Python",
"bytes": "250490"
}
],
"symlink_target": ""
} |
"""Example of ConfigDict usage of lock.
This example shows the roles and scopes of ConfigDict's lock().
"""
from absl import app
from ml_collections import config_dict
def main(_):
cfg = config_dict.ConfigDict()
cfg.integer_field = 123
# Locking prohibits the addition and deletion of new fields but allows
# modification of existing values. Locking happens automatically during
# loading through flags.
cfg.lock()
try:
cfg.intagar_field = 124 # Raises AttributeError and suggests valid field.
except AttributeError as e:
print(e)
cfg.integer_field = -123 # Works fine.
with cfg.unlocked():
cfg.intagar_field = 1555 # Works fine too.
print(cfg)
if __name__ == '__main__':
app.run()
| {
"content_hash": "49347021d12133e642a150aa7f3a0d1a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 23.548387096774192,
"alnum_prop": 0.7,
"repo_name": "google/ml_collections",
"id": "fe411932cd92f2400c660021e373f9172b35944e",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml_collections/config_dict/examples/config_dict_lock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "293642"
},
{
"name": "Shell",
"bytes": "1406"
},
{
"name": "Starlark",
"bytes": "8118"
}
],
"symlink_target": ""
} |
import time
import atexit
import heapq
from subprocess import Popen
from threading import Thread
from plumbum.lib import IS_WIN32, six
try:
from queue import Queue, Empty as QueueEmpty
except ImportError:
from Queue import Queue, Empty as QueueEmpty # type: ignore
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO # type: ignore
#===================================================================================================
# utility functions
#===================================================================================================
def _check_process(proc, retcode, timeout, stdout, stderr):
proc.verify(retcode, timeout, stdout, stderr)
return proc.returncode, stdout, stderr
def _iter_lines(proc, decode, linesize, line_timeout=None):
try:
from selectors import DefaultSelector, EVENT_READ
except ImportError:
# Pre Python 3.4 implementation
from select import select
def selector():
while True:
rlist, _, _ = select([proc.stdout, proc.stderr], [], [], line_timeout)
if not rlist and line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
for stream in rlist:
yield (stream is proc.stderr), decode(
stream.readline(linesize))
else:
# Python 3.4 implementation
def selector():
sel = DefaultSelector()
sel.register(proc.stdout, EVENT_READ, 0)
sel.register(proc.stderr, EVENT_READ, 1)
while True:
ready = sel.select(line_timeout)
if not ready and line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
for key, mask in ready:
yield key.data, decode(key.fileobj.readline(linesize))
for ret in selector():
yield ret
if proc.poll() is not None:
break
for line in proc.stdout:
yield 0, decode(line)
for line in proc.stderr:
yield 1, decode(line)
#===================================================================================================
# Exceptions
#===================================================================================================
class ProcessExecutionError(EnvironmentError):
"""Represents the failure of a process. When the exit code of a terminated process does not
match the expected result, this exception is raised by :func:`run_proc
<plumbum.commands.run_proc>`. It contains the process' return code, stdout, and stderr, as
well as the command line used to create the process (``argv``)
"""
def __init__(self, argv, retcode, stdout, stderr):
Exception.__init__(self, argv, retcode, stdout, stderr)
self.argv = argv
self.retcode = retcode
if six.PY3 and isinstance(stdout, six.bytes):
stdout = six.ascii(stdout)
if six.PY3 and isinstance(stderr, six.bytes):
stderr = six.ascii(stderr)
self.stdout = stdout
self.stderr = stderr
def __str__(self):
from plumbum.commands.base import shquote_list
stdout = "\n | ".join(str(self.stdout).splitlines())
stderr = "\n | ".join(str(self.stderr).splitlines())
cmd = " ".join(shquote_list(self.argv))
lines = ["Unexpected exit code: ", str(self.retcode)]
cmd = "\n | ".join(cmd.splitlines())
lines += ["\nCommand line: | ", cmd]
if stdout:
lines += ["\nStdout: | ", stdout]
if stderr:
lines += ["\nStderr: | ", stderr]
return "".join(lines)
class ProcessTimedOut(Exception):
"""Raises by :func:`run_proc <plumbum.commands.run_proc>` when a ``timeout`` has been
specified and it has elapsed before the process terminated"""
def __init__(self, msg, argv):
Exception.__init__(self, msg, argv)
self.argv = argv
class ProcessLineTimedOut(Exception):
"""Raises by :func:`iter_lines <plumbum.commands.iter_lines>` when a ``line_timeout`` has been
specified and it has elapsed before the process yielded another line"""
def __init__(self, msg, argv, machine):
Exception.__init__(self, msg, argv, machine)
self.argv = argv
self.machine = machine
class CommandNotFound(AttributeError):
"""Raised by :func:`local.which <plumbum.machines.local.LocalMachine.which>` and
:func:`RemoteMachine.which <plumbum.machines.remote.RemoteMachine.which>` when a
command was not found in the system's ``PATH``"""
def __init__(self, program, path):
Exception.__init__(self, program, path)
self.program = program
self.path = path
#===================================================================================================
# Timeout thread
#===================================================================================================
class MinHeap(object):
def __init__(self, items=()):
self._items = list(items)
heapq.heapify(self._items)
def __len__(self):
return len(self._items)
def push(self, item):
heapq.heappush(self._items, item)
def pop(self):
heapq.heappop(self._items)
def peek(self):
return self._items[0]
_timeout_queue = Queue()
_shutting_down = False
def _timeout_thread_func():
waiting = MinHeap()
try:
while not _shutting_down:
if waiting:
ttk, _ = waiting.peek()
timeout = max(0, ttk - time.time())
else:
timeout = None
try:
proc, time_to_kill = _timeout_queue.get(timeout=timeout)
if proc is SystemExit:
# terminate
return
waiting.push((time_to_kill, proc))
except QueueEmpty:
pass
now = time.time()
while waiting:
ttk, proc = waiting.peek()
if ttk > now:
break
waiting.pop()
try:
if proc.poll() is None:
proc.kill()
proc._timed_out = True
except EnvironmentError:
pass
except Exception:
if _shutting_down:
# to prevent all sorts of exceptions during interpreter shutdown
pass
else:
raise
bgthd = Thread(target=_timeout_thread_func, name="PlumbumTimeoutThread")
bgthd.setDaemon(True)
bgthd.start()
def _register_proc_timeout(proc, timeout):
if timeout is not None:
_timeout_queue.put((proc, time.time() + timeout))
def _shutdown_bg_threads():
global _shutting_down
_shutting_down = True
# Make sure this still exists (don't throw error in atexit!)
if _timeout_queue:
_timeout_queue.put((SystemExit, 0))
# grace period
bgthd.join(0.1)
atexit.register(_shutdown_bg_threads)
#===================================================================================================
# run_proc
#===================================================================================================
def run_proc(proc, retcode, timeout=None):
"""Waits for the given process to terminate, with the expected exit code
:param proc: a running Popen-like object, with all the expected methods.
:param retcode: the expected return (exit) code of the process. It defaults to 0 (the
convention for success). If ``None``, the return code is ignored.
It may also be a tuple (or any object that supports ``__contains__``)
of expected return codes.
:param timeout: the number of seconds (a ``float``) to allow the process to run, before
forcefully terminating it. If ``None``, not timeout is imposed; otherwise
the process is expected to terminate within that timeout value, or it will
be killed and :class:`ProcessTimedOut <plumbum.cli.ProcessTimedOut>`
will be raised
:returns: A tuple of (return code, stdout, stderr)
"""
_register_proc_timeout(proc, timeout)
stdout, stderr = proc.communicate()
proc._end_time = time.time()
if not stdout:
stdout = six.b("")
if not stderr:
stderr = six.b("")
if getattr(proc, "custom_encoding", None):
stdout = stdout.decode(proc.custom_encoding, "ignore")
stderr = stderr.decode(proc.custom_encoding, "ignore")
return _check_process(proc, retcode, timeout, stdout, stderr)
#===================================================================================================
# iter_lines
#===================================================================================================
BY_POSITION = object()
BY_TYPE = object()
DEFAULT_ITER_LINES_MODE = BY_POSITION
def iter_lines(proc,
retcode=0,
timeout=None,
linesize=-1,
line_timeout=None,
mode=None,
_iter_lines=_iter_lines,
):
"""Runs the given process (equivalent to run_proc()) and yields a tuples of (out, err) line pairs.
If the exit code of the process does not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
:param linesize: Maximum number of characters to read from stdout/stderr at each iteration.
``-1`` (default) reads until a b'\\n' is encountered.
:param line_timeout: The maximal amount of time (in seconds) to allow between consecutive lines in either stream.
Raise an :class:`ProcessLineTimedOut <plumbum.commands.ProcessLineTimedOut>` if the timeout has
been reached. ``None`` means no timeout is imposed.
:returns: An iterator of (out, err) line tuples.
"""
if mode is None:
mode = DEFAULT_ITER_LINES_MODE
assert mode in (BY_POSITION, BY_TYPE)
encoding = getattr(proc, "custom_encoding", None)
if encoding:
decode = lambda s: s.decode(encoding).rstrip()
else:
decode = lambda s: s
_register_proc_timeout(proc, timeout)
buffers = [StringIO(), StringIO()]
for t, line in _iter_lines(proc, decode, linesize, line_timeout):
# verify that the proc hasn't timed out yet
proc.verify(timeout=timeout, retcode=None, stdout=None, stderr=None)
buffers[t].write(line + "\n")
if mode is BY_POSITION:
ret = [None, None]
ret[t] = line
yield tuple(ret)
elif mode is BY_TYPE:
yield (t + 1), line # 1=stdout, 2=stderr
# this will take care of checking return code and timeouts
_check_process(proc, retcode, timeout, *(s.getvalue() for s in buffers))
| {
"content_hash": "1fe96f48466e18ec260147ef8ab2ec88",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 136,
"avg_line_length": 36.9559748427673,
"alnum_prop": 0.5445881552076243,
"repo_name": "AndydeCleyre/plumbum",
"id": "c5ab2573c8f37cc0eb60701c5d3e123e32258c5d",
"size": "11752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plumbum/commands/processes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "52"
},
{
"name": "Python",
"bytes": "440514"
},
{
"name": "Shell",
"bytes": "810"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''This module will add user principals to kerberos if it doesn't exists. It wont change password if user already exist in kerberos.'''
EXAMPLES = '''
- name: add user
kadduser: name='root' password='kerberos_password'
'''
from ansible.module_utils.basic import *
from subprocess import PIPE, Popen
# arguments that the module gets in various actions
MODULE_ARGUMENTS = {
'name': {'type': 'str', 'required': True},
'password': {'type': 'str'},
'params': {'type': 'str'}
}
def execute(cmd, scnd_command=None):
cmd = 'kadmin.local -q "{0}" '.format(cmd)
if scnd_command != None:
cmd += ' | {0}'.format(scnd_command)
proc = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
proc.wait()
return out, err
def main():
module = AnsibleModule(argument_spec=MODULE_ARGUMENTS)
# script will only set password at start, at creation time. If you want change it you have to delete user before script start
name_a = module.params.get('name', None)
password_a = module.params.get('password', None)
params_a = module.params.get('params', '')
std_o, err_o = execute('list_principals', ' grep "{0}@"'.format(name_a))
if err_o != '' and err_o != None:
module.fail_json(msg='Kerberos error {0}'.format(err_o))
changed = False
# checking if principal elready exist
if std_o == '' or std_o == None:
cmd_a = 'addprinc ';
if password_a != None and password_a != '':
cmd_a += '-pw {1} '
elif '-nokey' not in params_a:
cmd_a += '-randkey '
cmd_a += '{2} {0}'
std_o, err_o = execute(cmd_a.format(name_a, password_a, params_a))
if err_o != '' and err_o != None and err_o[0] != 'W':
module.fail_json(msg='Kerberos error {0}'.format(err_o))
changed = True
module.exit_json(changed=changed, msg='Everything is done')
main()
| {
"content_hash": "ee1daf9426b2209267a5f1ca498f2d76",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 151,
"avg_line_length": 33.836363636363636,
"alnum_prop": 0.6415905427189683,
"repo_name": "trustedanalytics/platform-ansible",
"id": "c570847043df51f5876fc87c4d0576683490ceff",
"size": "2667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/kadduser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23338"
},
{
"name": "Nginx",
"bytes": "1331"
},
{
"name": "PHP",
"bytes": "566"
},
{
"name": "Python",
"bytes": "69357"
},
{
"name": "Shell",
"bytes": "51660"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import copy
import matplotlib.cm as cm
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# Set the right path to your model definition file, pretrained model weights,
# and the image you would like to classify.
MODEL_FILE = '../models/bvlc_reference_caffenet/deploy_fc8.prototxt'
PRETRAINED = '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
IMAGE_FILE = 'images/cat.jpg'
caffe.set_mode_cpu()
net = caffe.Classifier(MODEL_FILE, PRETRAINED,
mean=np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1),
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
input_image = caffe.io.load_image(IMAGE_FILE)
input_image = input_image
n_iterations = 10000
label_index = 281 # Index for cat class
caffe_data = np.random.random((1,3,227,227))
caffeLabel = np.zeros((1,1000,1,1))
caffeLabel[0,label_index,0,0] = 1;
def visSquare(data1, padsize=1, padval=0):
data = copy.deepcopy(data1)
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
plt.show(block=False)
return data
#Perform a forward pass with the data as the input image
pred = net.predict([input_image])
#Perform a backward pass for the cat class (281)
bw = net.backward(**{net.outputs[0]: caffeLabel})#
diff = bw['data']
# Find the saliency map as described in the paper. Normalize the map and assign it to variabe "saliency"
diff -= diff.min()
diff /= diff.max()
diff_sq = np.squeeze(diff)
saliency = np.amax(diff_sq,axis=0)
#display the saliency map
plt.subplot(1,2,1)
plt.imshow(saliency, cmap=cm.gray_r)
plt.subplot(1,2,2)
plt.imshow(net.transformer.deprocess('data', net.blobs['data'].data[0]))
#plt.show()
plt.savefig('ps3part2.png')
| {
"content_hash": "b0554f2679c4268652e9fd72abc955ae",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 111,
"avg_line_length": 33.171052631578945,
"alnum_prop": 0.6648155493851646,
"repo_name": "smajida/Deep_Inside_Convolutional_Networks",
"id": "fd8e90272a392f3d121e94f6d501b29f8995fb8b",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class_saliency_extraction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9060"
}
],
"symlink_target": ""
} |
"""
Filter the event file and the exposure map, divide by CCD, then run xtdac on each CCD
"""
import argparse
import glob
import os
import sys
import astropy.io.fits as pyfits
from chandra_suli import find_files
from chandra_suli import logging_system
from chandra_suli.data_package import DataPackage
from chandra_suli.run_command import CommandRunner
def filter_exposure_map(exposure_map, regions_file, eventfile, new_exposure_map, resample_factor=1):
if regions_file.find(".reg") < 0:
# Generate an almost empty event file which will be used by xtcheesemask to extract the WCS and the
# characteristics of the hardware unit (i.e., of the ccd)
with pyfits.open(eventfile) as f:
small_data = f['EVENTS'].data[:2]
header = f['EVENTS'].header
new_hdu = pyfits.BinTableHDU(data=small_data, header=header)
# Now append the region table
with pyfits.open(regions_file) as f:
region_hdu = f['SRCREG']
hdu_list = pyfits.HDUList([pyfits.PrimaryHDU(), new_hdu, region_hdu])
temp_file = '___2_events.fits'
hdu_list.writeto(temp_file, clobber=True)
else:
temp_file = regions_file
cmd_line = "xtcheesemask.py -i %s -r %s -o %s -s %s --no-reverse" \
% (exposure_map, temp_file, new_exposure_map, resample_factor)
runner.run(cmd_line)
os.remove(temp_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Bayesian Block algorithm')
parser.add_argument("-d", "--datarepository", help="Path to the data repository, where all the packages for all "
"observations are", type=str, required=True)
parser.add_argument("-o", "--obsid", help="Observation ID Numbers", type=int, required=True, nargs="+")
parser.add_argument('-r', '--region_repo', help="Path to the repository of region files",
type=str, required=True)
parser.add_argument('-a', "--adj_factor",
help="If region files need to be adjusted, what factor to increase axes of ellipses by",
type=float, required=True)
parser.add_argument("-e1", "--emin", help="Minimum energy (eV)", type=int, required=True)
parser.add_argument("-e2", "--emax", help="Maximum energy (eV)", type=int, required=True)
parser.add_argument("-c", "--ncpus", help="Number of CPUs to use (default=1)",
type=int, default=1, required=False)
parser.add_argument("-p", "--typeIerror",
help="Type I error probability for the Bayesian Blocks algorithm.",
type=float,
default=1e-5,
required=False)
parser.add_argument("-s", "--sigmaThreshold",
help="Threshold for the final significance. All intervals found "
"by the bayesian blocks "
"algorithm which does not surpass this threshold will not be saved in the "
"final file.",
type=float,
default=5.0,
required=False)
parser.add_argument("-m", "--multiplicity", help="Control the overlap of the regions."
" A multiplicity of 2 means the centers of the regions are"
" shifted by 1/2 of the region size (they overlap by 50 percent),"
" a multiplicity of 4 means they are shifted by 1/4 of "
" their size (they overlap by 75 percent), and so on.",
required=False, default=2.0, type=float)
parser.add_argument("-v", "--verbosity", help="Info or debug", type=str, required=False, default='info',
choices=['info', 'debug'])
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Get the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
for this_obsid in args.obsid:
# Get the data package for the input data
data_package = DataPackage(os.path.join(args.datarepository, str(this_obsid)))
# NOTE: .get() will copy the file here
evtfile = data_package.get('evt3')
tsvfile = data_package.get('tsv')
expfile = data_package.get('exp3')
# Prepare output package
out_package = DataPackage(str(this_obsid), create=True)
# Make sure it is empty, otherwise emtpy it
out_package.clear()
#######################################
# Filtering
#######################################
# Figure out the path for the regions files for this obsid
region_dir = os.path.join(os.path.expandvars(os.path.expanduser(args.region_repo)), '%s' % this_obsid)
cmd_line = "filter_event_file.py --region_dir %s --in_package %s --out_package %s --emin %d --emax %d " \
"--adj_factor %s --randomize_time" \
% (region_dir, data_package.location, out_package.location,
args.emin, args.emax, args.adj_factor)
runner.run(cmd_line)
# Products are: filtered_evt3, all_regions and (if any) streak_regions_ds9
###### Remove hot pixels
events_no_hot_pixels = '%s_filtered_nohot.fits' % this_obsid
cmd_line = "prefilter_hot_pixels.py --evtfile %s --outfile %s" % (out_package.get('filtered_evt3').filename,
events_no_hot_pixels)
runner.run(cmd_line)
out_package.store('filtered_nohot', events_no_hot_pixels,
"Filtered event file (evt3) with events in hot pixels removed")
#######################################
# Separate CCDs
#######################################
cmd_line = "separate_CCD.py --evtfile %s" % out_package.get('filtered_nohot').filename
runner.run(cmd_line)
ccd_files = find_files.find_files('.', 'ccd*%s*fits' % this_obsid)
#######################################
# Run Bayesian Block on each CCD
#######################################
for ccd_file in ccd_files:
# Get the root of the ccd filename and the ccd number (will be used to name the files)
ccd_root = os.path.splitext(os.path.basename(ccd_file))[0]
ccd_number = os.path.basename(ccd_file).split("_")[1]
logger.info("########################################")
logger.info("Processing CCD %s..." % ccd_number)
logger.info("########################################")
# First filter the exposure map
filtered_expomap = 'ccd_%s_filtered_expomap.fits' % ccd_number
# xtcheesemask, used by filter_exposure_map, cannot overwrite files, so delete the file
# if existing
try:
os.remove(filtered_expomap)
except:
pass
# NOTE: use only a resample factor of 1, or the destreaking will fail
filter_exposure_map(data_package.get('exp3').filename, out_package.get('all_regions').filename,
ccd_file, filtered_expomap, resample_factor=1)
if out_package.has('streak_regions_ds9'):
# Filter also for the streaks
temp_file = '__expomap_temp.fits'
filter_exposure_map(filtered_expomap, out_package.get('streak_regions_ds9').filename,
ccd_file, temp_file, resample_factor=1)
os.remove(filtered_expomap)
os.rename(temp_file, filtered_expomap)
# Register the filtered expomap
out_package.store('ccd_%s_filtered_expomap' % ccd_number, filtered_expomap,
"Expomap for CCD %s, filtered for all the regions which have been "
"used for the event file" % ccd_number)
###### XTDAC #########
cmd_line = "xtdac.py -e %s -x %s -w yes -c %s -p %s -s %s -m %s -v %s --max_duration 50000 " \
"--transient_pos" \
% (ccd_file, filtered_expomap, args.ncpus, args.typeIerror,
args.sigmaThreshold, args.multiplicity, args.verbosity)
runner.run(cmd_line)
#####################
# Now register the output in the output data package
raw_candidate_list_file = "ccd_%s_%s_filtered_nohot_res.txt" % (ccd_number, this_obsid)
out_package.store("ccd_%s_raw_list" % ccd_number, raw_candidate_list_file,
"Unfiltered list of candidates for CCD %s (output of xtdac)" % ccd_number)
out_package.store("ccd_%s_xtdac_html" % ccd_number, "ccd_%s_%s_filtered_nohot_res.html" % (ccd_number,
this_obsid),
"HTML file produced by xtdac, containing the unfiltered list of candidates "
"for ccd %s" % ccd_number)
output_files = glob.glob("ccd_%s_%s_*candidate*.reg" % (ccd_number, this_obsid))
for i, output in enumerate(output_files):
reg_id = output.split("_")[-1].split(".reg")[0]
out_package.store("ccd_%s_candidate_reg%s" % (ccd_number, reg_id), output,
"Ds9 region file for candidate %s" % reg_id)
#######################################
# Filter candidate list
#######################################
# Hot pixels
check_hp_file = "check_hp_%s_%s.txt" % (ccd_number, this_obsid)
cmd_line = "check_hot_pixel_revised.py --obsid %s --evtfile %s --bbfile %s --outfile %s --debug no" \
% (this_obsid, ccd_file, raw_candidate_list_file, check_hp_file)
runner.run(cmd_line)
# Register output
out_package.store("ccd_%s_check_hp" % ccd_number, check_hp_file,
"List of candidates for CCD %s with hot pixels flagged" % ccd_number)
# Variable sources
check_var_file = "check_var_%s_%s.txt" % (ccd_number, this_obsid)
cmd_line = "check_variable_revised.py --bbfile %s --outfile %s --eventfile %s" \
% (check_hp_file, check_var_file, ccd_file)
runner.run(cmd_line)
# Register output
out_package.store("ccd_%s_check_var" % ccd_number, check_var_file,
"List of candidates for CCD %s with hot pixels and variable sources flagged" % ccd_number)
# Now add candidates to master list (one list for this obsid)
candidate_file = "%s_all_candidates.txt" % this_obsid
cmd_line = "add_to_masterlist.py --package %s --masterfile %s" % (out_package.location, candidate_file)
runner.run(cmd_line)
# Reopen the file and write the command line which generated this analysis as a comment
with open(candidate_file, "a") as f:
f.write("\n# command line:\n# %s\n" % " ".join(sys.argv))
out_package.store("candidates", candidate_file, "List of all candidates found in all CCDs")
| {
"content_hash": "fe4244a188dcfc64b6cf7455056f43f8",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 120,
"avg_line_length": 39.245791245791246,
"alnum_prop": 0.5307995881949211,
"repo_name": "nitikayad96/chandra_suli",
"id": "20dc7d3c726892985429751dedb0b50ffa0b9518",
"size": "11679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chandra_suli/farm_step2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "139543"
}
],
"symlink_target": ""
} |
from typing import List
from test_framework import generic_test
def buy_and_sell_stock_once(prices: List[float]) -> float:
# TODO - you fill in here.
return 0.0
if __name__ == '__main__':
exit(
generic_test.generic_test_main('buy_and_sell_stock.py',
'buy_and_sell_stock.tsv',
buy_and_sell_stock_once))
| {
"content_hash": "0bdaeab1af0b6d33e50951b6325054a1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 27,
"alnum_prop": 0.5308641975308642,
"repo_name": "shobhitmishra/CodingProblems",
"id": "9f093fd01a931416d94b4649aab753669e224603",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_judge_python/buy_and_sell_stock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
} |
import getpass
import sys
if sys.stdin.isatty():
p = getpass.getpass('Using getpass:')
else:
print('Using readline')
p = sys.stdin.readline().rstrip()
print('Read', p) | {
"content_hash": "a8108d90213a89e6d2eb193fc8bfd8ac",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 22.5,
"alnum_prop": 0.6666666666666666,
"repo_name": "gaufung/PythonStandardLibrary",
"id": "84a97cff1808af227e90924ecda1f78bc4376b1b",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ApplicationBuildingBlock/getpass/getpass_noterminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3175176"
},
{
"name": "Python",
"bytes": "70796"
}
],
"symlink_target": ""
} |
import unittest
from contextlib import contextmanager
from llvmlite import ir
from numba.core import types, typing, callconv, cpu, cgutils
from numba.core.registry import cpu_target
class TestCompileCache(unittest.TestCase):
'''
Tests that the caching in BaseContext.compile_internal() works correctly by
checking the state of the cache when it is used by the CPUContext.
'''
@contextmanager
def _context_builder_sig_args(self):
typing_context = cpu_target.typing_context
context = cpu_target.target_context
lib = context.codegen().create_library('testing')
with context.push_code_library(lib):
module = ir.Module("test_module")
sig = typing.signature(types.int32, types.int32)
llvm_fnty = context.call_conv.get_function_type(sig.return_type,
sig.args)
function = cgutils.get_or_insert_function(module, llvm_fnty,
'test_fn')
args = context.call_conv.get_arguments(function)
assert function.is_declaration
entry_block = function.append_basic_block('entry')
builder = ir.IRBuilder(entry_block)
yield context, builder, sig, args
def test_cache(self):
def times2(i):
return 2*i
def times3(i):
return i*3
with self._context_builder_sig_args() as (
context, builder, sig, args,
):
initial_cache_size = len(context.cached_internal_func)
# Ensure the cache is empty to begin with
self.assertEqual(initial_cache_size + 0,
len(context.cached_internal_func))
# After one compile, it should contain one entry
context.compile_internal(builder, times2, sig, args)
self.assertEqual(initial_cache_size + 1,
len(context.cached_internal_func))
# After a second compilation of the same thing, it should still contain
# one entry
context.compile_internal(builder, times2, sig, args)
self.assertEqual(initial_cache_size + 1,
len(context.cached_internal_func))
# After compilation of another function, the cache should have grown by
# one more.
context.compile_internal(builder, times3, sig, args)
self.assertEqual(initial_cache_size + 2,
len(context.cached_internal_func))
sig2 = typing.signature(types.float64, types.float64)
llvm_fnty2 = context.call_conv.get_function_type(sig2.return_type,
sig2.args)
function2 = cgutils.get_or_insert_function(builder.module,
llvm_fnty2, 'test_fn_2')
args2 = context.call_conv.get_arguments(function2)
assert function2.is_declaration
entry_block2 = function2.append_basic_block('entry')
builder2 = ir.IRBuilder(entry_block2)
# Ensure that the same function with a different signature does not
# reuse an entry from the cache in error
context.compile_internal(builder2, times3, sig2, args2)
self.assertEqual(initial_cache_size + 3,
len(context.cached_internal_func))
def test_closures(self):
"""
Caching must not mix up closures reusing the same code object.
"""
def make_closure(x, y):
def f(z):
return y + z
return f
with self._context_builder_sig_args() as (
context, builder, sig, args,
):
# Closures with distinct cell contents must each be compiled.
clo11 = make_closure(1, 1)
clo12 = make_closure(1, 2)
clo22 = make_closure(2, 2)
initial_cache_size = len(context.cached_internal_func)
res1 = context.compile_internal(builder, clo11, sig, args)
self.assertEqual(initial_cache_size + 1,
len(context.cached_internal_func))
res2 = context.compile_internal(builder, clo12, sig, args)
self.assertEqual(initial_cache_size + 2,
len(context.cached_internal_func))
# Same cell contents as above (first parameter isn't captured)
res3 = context.compile_internal(builder, clo22, sig, args)
self.assertEqual(initial_cache_size + 2,
len(context.cached_internal_func))
def test_error_model(self):
"""
Caching must not mix up different error models.
"""
def inv(x):
return 1.0 / x
inv_sig = typing.signature(types.float64, types.float64)
def compile_inv(context):
return context.compile_subroutine(builder, inv, inv_sig)
with self._context_builder_sig_args() as (
context, builder, sig, args,
):
py_error_model = callconv.create_error_model('python', context)
np_error_model = callconv.create_error_model('numpy', context)
py_context1 = context.subtarget(error_model=py_error_model)
py_context2 = context.subtarget(error_model=py_error_model)
np_context = context.subtarget(error_model=np_error_model)
initial_cache_size = len(context.cached_internal_func)
# Note the parent context's cache is shared by subtargets
self.assertEqual(initial_cache_size + 0,
len(context.cached_internal_func))
# Compiling with the same error model reuses the same cache slot
compile_inv(py_context1)
self.assertEqual(initial_cache_size + 1,
len(context.cached_internal_func))
compile_inv(py_context2)
self.assertEqual(initial_cache_size + 1,
len(context.cached_internal_func))
# Compiling with another error model creates a new cache slot
compile_inv(np_context)
self.assertEqual(initial_cache_size + 2,
len(context.cached_internal_func))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "814351d08dc5438d37ab46c4f41cfa49",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 83,
"avg_line_length": 42.19607843137255,
"alnum_prop": 0.5707868649318464,
"repo_name": "IntelLabs/numba",
"id": "edeab592d3e69b190fd40515b0aaf5167c2fd4a7",
"size": "6456",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "numba/tests/test_compile_cache.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6984"
},
{
"name": "C",
"bytes": "639446"
},
{
"name": "C++",
"bytes": "93702"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8764393"
},
{
"name": "Shell",
"bytes": "13542"
}
],
"symlink_target": ""
} |
from datetime import datetime
from HTMLParser import HTMLParser
import argparse
import json
import os
import sys
import urllib2
###############################################################################
# Options
###############################################################################
MAGIC_URL = 'http://magic.gae-init.appspot.com'
PARSER = argparse.ArgumentParser(description='Visit %s for more.' % MAGIC_URL)
PARSER.add_argument(
'-p', '--project', dest='project_id', action='store',
help='project ID of the project that you want to sync',
)
PARSER.add_argument(
'-r', '--remote', dest='remote_url', action='store', default=MAGIC_URL,
help="set the remote URL if it's not http://magic.gae-init.appspot.com",
)
ARGS = PARSER.parse_args()
###############################################################################
# Constants
###############################################################################
DIR_MAIN = 'main'
DIR_CONTROL = os.path.join(DIR_MAIN, 'control')
FILE_CONTROL_INIT = os.path.join(DIR_CONTROL, '__init__.py')
DIR_MODEL = os.path.join(DIR_MAIN, 'model')
FILE_MODEL_INIT = os.path.join(DIR_MODEL, '__init__.py')
DIR_API = os.path.join(DIR_MAIN, 'api', 'v1')
FILE_API_INIT = os.path.join(DIR_API, '__init__.py')
DIR_TEMPLATES = os.path.join(DIR_MAIN, 'templates')
FILE_HEADER = os.path.join(DIR_TEMPLATES, 'bit', 'header.html')
FILE_ADMIN = os.path.join(DIR_TEMPLATES, 'admin', 'admin.html')
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
directory = os.path.dirname(directory)
if not os.path.exists(directory):
os.makedirs(directory)
def append_to(project_url, destination):
url = ('%smagic/%s' % (project_url, destination)).replace('\\', '/')
response = urllib2.urlopen(url)
if response.getcode() == 200:
with open(destination, 'r') as dest:
lines = ''.join(dest.readlines())
content = response.read()
if content in lines:
print_out('IGNORED', destination)
return
with open(destination, 'a') as dest:
dest.write(content)
print_out('APPEND', destination)
def insert_to(project_url, destination, find_what, indent=0):
url = ('%smagic/%s' % (project_url, destination)).replace('\\', '/')
response = urllib2.urlopen(url)
if response.getcode() == 200:
with open(destination, 'r') as dest:
dest_contents = dest.readlines()
lines = ''.join(dest_contents)
content = HTMLParser().unescape(response.read())
if content.replace(' ', '') in lines.replace(' ', ''):
print_out('IGNORED', destination)
return
generated = []
for line in dest_contents:
generated.append(line)
if line.lower().find(find_what.lower()) >= 0:
spaces = len(line) - len(line.lstrip())
for l in content.split('\n'):
if l:
generated.append('%s%s\n' % (' ' * (spaces + indent), l))
with open(destination, 'w') as dest:
for line in generated:
dest.write(line)
print_out('INSERT', destination)
def create_file(project_url, destination):
make_dirs(destination)
url = ('%smagic/%s' % (project_url, destination)).replace('\\', '/')
response = urllib2.urlopen(url)
if response.getcode() == 200:
with open(destination, 'w') as dest:
dest.write('%s\n' % HTMLParser().unescape(response.read()))
print_out('CREATE', destination)
def get_project_db():
url = '%s/api/v1/project/%s/' % (ARGS.remote_url, ARGS.project_id.split('/')[0])
response = urllib2.urlopen(url)
if response.getcode() == 200:
project_body = response.read()
project_db = json.loads(project_body)['result']
project_db['project_url'] = url
return project_db
return None
def sync_from_magic(project_db):
model_dbs = {}
project_url = project_db['project_url']
model_url = '%smodel/' % project_url
response = urllib2.urlopen(model_url)
if response.getcode() == 200:
models_body = response.read()
model_dbs = json.loads(models_body)['result']
print_out('UPDATING')
append_to(project_url, FILE_MODEL_INIT)
append_to(project_url, FILE_CONTROL_INIT)
append_to(project_url, FILE_API_INIT)
insert_to(project_url, FILE_HEADER, '<ul class="nav navbar-nav">', 2)
insert_to(project_url, FILE_ADMIN, "url_for('user_list'")
for index, model_db in enumerate(model_dbs):
print_out('%d of %d' % (index + 1, project_db['model_count']))
name = model_db['variable_name']
create_file(project_url, os.path.join(DIR_MODEL, '%s.py' % name))
create_file(project_url, os.path.join(DIR_CONTROL, '%s.py' % name))
create_file(project_url, os.path.join(DIR_API, '%s.py' % name))
root = os.path.join(DIR_TEMPLATES, name)
create_file(project_url, os.path.join(root, '%s_update.html' % name))
create_file(project_url, os.path.join(root, '%s_view.html' % name))
create_file(project_url, os.path.join(root, '%s_list.html' % name))
create_file(project_url, os.path.join(root, 'admin_%s_update.html' % name))
create_file(project_url, os.path.join(root, 'admin_%s_list.html' % name))
###############################################################################
# Main
###############################################################################
def magic():
if len(sys.argv) == 1:
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if ARGS.project_id:
project_db = get_project_db()
answer = raw_input(
'Are you sure you want to sync "%(name)s" with %(model_count)d '
'model(s) that was modified on %(modified)s? (Y/n): '
% {
'name': project_db['name'],
'model_count': project_db['model_count'],
'modified': project_db['modified'][:16].replace('T', ' at '),
}
)
if not answer or answer.lower() == 'y':
sync_from_magic(project_db)
else:
print 'Project ID is not provided.'
PARSER.print_help()
if __name__ == '__main__':
magic()
| {
"content_hash": "87910dea3b7305f4c3f20c4e48765dec",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 82,
"avg_line_length": 33.579787234042556,
"alnum_prop": 0.5705686678282909,
"repo_name": "dhstack/gae-init",
"id": "4e23b04e4d928e987e8556c26b1ac870ef33de44",
"size": "6352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magic.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15187"
},
{
"name": "CoffeeScript",
"bytes": "16124"
},
{
"name": "HTML",
"bytes": "67399"
},
{
"name": "JavaScript",
"bytes": "43082"
},
{
"name": "Python",
"bytes": "187401"
}
],
"symlink_target": ""
} |
"""
Simple Connect Four game.
Author: Chris Lyon
Contact: chris@cplyon.ca
Date: 2015-09-19
"""
import sys
from dataclasses import dataclass
from enum import IntEnum
class Colour(IntEnum):
"""
Enum of possible cell values on the game board.
"""
NONE = 0
PLAYER_1 = 1
PLAYER_2 = 2
@dataclass
class Player:
"""
Encapsulates properties of a player.
"""
PLAYER_1 = "Player 1"
PLAYER_2 = "Player 2"
name: str
colour: Colour
class Board:
"""
The game board, with customizable size
"""
def __init__(self, rows: int, columns: int, goal: int):
self.rows = rows
self.columns = columns
self._board = [
[Colour.NONE for x in range(self.columns)] for x in range(self.rows)
]
self.filled_cells = 0
self.goal = goal
def __getitem__(self, key: int) -> Colour:
"""
Allow indexing into the board, like a 2D array
"""
return self._board[key]
def __str__(self) -> str:
"""
Return a string representation of the board.
"""
printable_board = ""
for row in range(self.rows):
for col in range(self.columns):
printable_board += f"{self._board[row][col]} "
printable_board += "\n"
return printable_board
def is_full(self) -> bool:
"""
Return True if all cells on board have been filled.
Return False otherwise.
"""
return self.filled_cells == (self.rows * self.columns)
def is_column_full(self, column: int) -> bool:
"""
Return True if top-most cell in column is not NONE
Return False otherwise
"""
return self._board[0][column] is not Colour.NONE
def drop_piece(self, colour: Colour, column: int) -> int:
"""
Set the lowest empty cell in column to colour.
Return the row that was coloured.
Return None if column is full.
"""
for row in reversed(range(self.rows)):
if self._board[row][column] is Colour.NONE:
# we found the lowest empty cell
self._board[row][column] = colour
self.filled_cells += 1
return row
return None
def check_top_left_to_bottom_right(
self, colour: Colour, row: int, column: int
) -> bool:
"""
Return True if there exists a diagonal streak of length or more cells
on the board in row.
Return False otherwise.
"""
# need to check entire diagonal, since a piece could have been added in
# the middle of a streak
streak_length = 0
r = row - min(row, column)
c = column - min(row, column)
while r < self.rows and c < self.columns:
if self._board[r][c] is colour:
streak_length += 1
if streak_length >= self.goal:
return True
else:
streak_length = 0
r += 1
c += 1
return False
def check_top_right_to_bottom_left(
self, colour: Colour, row: int, column: int
) -> bool:
"""
Return True if there exists a diagonal streak of length or more cells
on the board in row.
Return False otherwise.
"""
# need to check entire diagonal, since a piece could have been added in
# the middle of a streak
streak_length = 0
r = row - min(row, self.columns - column - 1)
c = column + min(row, self.columns - column - 1)
while r < self.rows and c >= 0:
if self._board[r][c] is colour:
streak_length += 1
if streak_length == self.goal:
return True
else:
streak_length = 0
r += 1
c -= 1
return False
def check_horizontal(self, colour: Colour, row: int) -> bool:
"""
Return True if there exists a horizontal streak of length or more cells
on the board in row.
Return False otherwise.
"""
streak_length = 0
# need to check entire row, since a piece could have been added in the
# middle of a streak
for col in range(self.columns):
if self._board[row][col] is colour:
streak_length += 1
if streak_length == self.goal:
return True
else:
streak_length = 0
return False
def check_vertical(self, colour: Colour, row: int, column: int) -> bool:
"""
Return True if there exists a vertical streak of length or more cells
on the board in column starting at row.
Return False otherwise.
"""
streak_length = 1
# if the piece is less than goal cells away from the bottom, we know
# there's no winner yet
if self.rows - row < self.goal:
return False
# only need to check from this row down, since this piece is guaranteed
# to be on top because of gravity
for r in range(row + 1, self.rows):
if self._board[r][column] is colour:
streak_length += 1
if streak_length == self.goal:
return True
else:
# we can bail as soon as we find a cell not the target colour
break
return False
class Game:
"""
Contains turn logic and determines winner.
"""
TIE = "TIE GAME!"
DEFAULT_COLUMNS = 7
DEFAULT_ROWS = 6
GOAL = 4
def __init__(self):
self.player1 = Player(Player.PLAYER_1, Colour.PLAYER_1)
self.player2 = Player(Player.PLAYER_2, Colour.PLAYER_2)
self.board = Board(self.DEFAULT_ROWS, self.DEFAULT_COLUMNS, self.GOAL)
self.winner = None
self.turn = self.player1
def play(self, column: int) -> bool:
"""
Main game logic. Place a piece on the board, determine a winner
and end the current player's turn.
column is the desired index + 1 (1-based index from user input)
Return True if piece was successfully placed into an empty cell.
Return False if invalid or illegal column.
"""
try:
column = int(column) - 1
except ValueError:
# not a number
return False
if column < 0 or column >= self.board.columns:
# out of bounds
return False
if self.board.is_column_full(column):
# column is full
return False
# drop current player's piece into column
row = self.board.drop_piece(self.turn.colour, column)
# check for winner. There can be only one!
if not self.winner:
self.winner = self.determine_winner(self.turn.colour, row, column)
# end turn
self.turn = self.player2 if self.turn is self.player1 else self.player1
return True
def determine_winner(self, colour: Colour, row: int, column: int) -> Player:
"""
Return the player name who has created a streak of four or more either
horizontally, vertically or diagonally.
Return Game.TIE if all board cells have been filled without a winner.
Return None if game is not over.
"""
if (
self.board.check_horizontal(colour, row)
or self.board.check_vertical(colour, row, column)
or self.board.check_top_left_to_bottom_right(colour, row, column)
or self.board.check_top_right_to_bottom_left(colour, row, column)
):
return self.turn.name
if self.board.is_full():
# no available spaces, and no winner found. Game over!
return Game.TIE
# no winner yet
return None
def main():
"""
Entry point for the game
"""
game = Game()
# game loop
while True:
print()
print(game.board)
print(f"{game.turn.name} choose a column:")
user_input = sys.stdin.readline().strip()
if str(user_input).lower() == "q":
sys.exit(0)
if not game.play(user_input):
print("Invalid column. Try again")
if game.winner:
break
print()
print(game.board)
if game.winner == Game.TIE:
print(Game.TIE)
else:
print(f"Winner is {game.winner}!")
print()
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "56d058467b09ffaa1b271e13a4a7c3fd",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 80,
"avg_line_length": 29.825783972125436,
"alnum_prop": 0.5503504672897196,
"repo_name": "cplyon/c4",
"id": "fc946d632fdbad14142eaede35717af6e938bcee",
"size": "8584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c4/c4.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20091"
}
],
"symlink_target": ""
} |
"""
Implements a directory-backed addressbook hierarchy.
"""
__all__ = [
"DirectoryAddressBookHomeProvisioningResource",
"DirectoryAddressBookHomeTypeProvisioningResource",
"DirectoryAddressBookHomeUIDProvisioningResource",
"DirectoryAddressBookHomeResource",
]
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.dav.util import joinURL
from txweb2.http import HTTPError
from txweb2.http_headers import ETag, MimeType
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.config import config
from twistedcaldav.directory.common import CommonUIDProvisioningResource,\
uidsResourceName, CommonHomeTypeProvisioningResource
from twistedcaldav.extensions import ReadOnlyResourceMixIn, DAVResource,\
DAVResourceWithChildrenMixin
from twistedcaldav.resource import AddressBookHomeResource
from uuid import uuid4
log = Logger()
# FIXME: copied from resource.py to avoid circular dependency
class CalDAVComplianceMixIn(object):
def davComplianceClasses(self):
return (
tuple(super(CalDAVComplianceMixIn, self).davComplianceClasses())
+ config.CalDAVComplianceClasses
)
class DirectoryAddressBookProvisioningResource(
ReadOnlyResourceMixIn,
CalDAVComplianceMixIn,
DAVResourceWithChildrenMixin,
DAVResource,
):
def defaultAccessControlList(self):
return succeed(config.ProvisioningResourceACL)
def etag(self):
return succeed(ETag(str(uuid4())))
def contentType(self):
return MimeType("httpd", "unix-directory")
class DirectoryAddressBookHomeProvisioningResource(
DirectoryAddressBookProvisioningResource
):
"""
Resource which provisions address book home collections as needed.
"""
def __init__(self, directory, url, store):
"""
@param directory: an L{IDirectoryService} to provision address books from.
@param url: the canonical URL for the resource.
"""
assert directory is not None
assert url.endswith("/"), "Collection URL must end in '/'"
super(DirectoryAddressBookHomeProvisioningResource, self).__init__()
# MOVE2WHO
self.directory = directory # IDirectoryService(directory)
self._url = url
self._newStore = store
#
# Create children
#
# ...just users, locations, and resources though. If we iterate all of
# the directory's recordTypes, we also get the proxy sub principal types
# and other things which don't have addressbooks.
self.supportedChildTypes = (
self.directory.recordType.user,
self.directory.recordType.location,
self.directory.recordType.resource,
)
for recordType, recordTypeName in [
(r, self.directory.recordTypeToOldName(r)) for r in
self.supportedChildTypes
]:
self.putChild(
recordTypeName,
DirectoryAddressBookHomeTypeProvisioningResource(
self, recordTypeName, recordType
)
)
self.putChild(uidsResourceName, DirectoryAddressBookHomeUIDProvisioningResource(self))
def url(self):
return self._url
def listChildren(self):
return [
self.directory.recordTypeToOldName(r) for r in
self.supportedChildTypes
]
def principalCollections(self):
# FIXME: directory.principalCollection smells like a hack
# See DirectoryPrincipalProvisioningResource.__init__()
return self.directory.principalCollection.principalCollections()
def principalForRecord(self, record):
# FIXME: directory.principalCollection smells like a hack
# See DirectoryPrincipalProvisioningResource.__init__()
return self.directory.principalCollection.principalForRecord(record)
@inlineCallbacks
def homeForDirectoryRecord(self, record, request):
uidResource = yield self.getChild(uidsResourceName)
if uidResource is None:
returnValue(None)
else:
returnValue((yield uidResource.homeResourceForRecord(record, request)))
##
# DAV
##
def isCollection(self):
return True
def displayName(self):
return "addressbooks"
class DirectoryAddressBookHomeTypeProvisioningResource (
CommonHomeTypeProvisioningResource,
DirectoryAddressBookProvisioningResource
):
"""
Resource which provisions address book home collections of a specific
record type as needed.
"""
def __init__(self, parent, name, recordType):
"""
@param parent: the parent of this resource
@param recordType: the directory record type to provision.
"""
assert parent is not None
assert name is not None
assert recordType is not None
super(DirectoryAddressBookHomeTypeProvisioningResource, self).__init__()
self.directory = parent.directory
self.name = name
self.recordType = recordType
self._parent = parent
def url(self):
return joinURL(self._parent.url(), self.name)
@inlineCallbacks
def listChildren(self):
if config.EnablePrincipalListings:
children = []
for record in (
yield self.directory.recordsWithRecordType(self.recordType)
):
if getattr(record, "hasContacts", False):
for shortName in record.shortNames:
children.append(shortName)
returnValue(children)
else:
# Not a listable collection
raise HTTPError(responsecode.FORBIDDEN)
def makeChild(self, name):
return None
##
# DAV
##
def isCollection(self):
return True
def displayName(self):
return self.directory.recordTypeToOldName(self.recordType)
##
# ACL
##
def principalCollections(self):
return self._parent.principalCollections()
def principalForRecord(self, record):
return self._parent.principalForRecord(record)
class DirectoryAddressBookHomeUIDProvisioningResource (
CommonUIDProvisioningResource,
DirectoryAddressBookProvisioningResource
):
homeResourceTypeName = 'addressbooks'
enabledAttribute = 'hasContacts'
def homeResourceCreator(self, record, transaction):
return DirectoryAddressBookHomeResource.createHomeResource(
self, record, transaction)
class DirectoryAddressBookHomeResource (AddressBookHomeResource):
"""
Address book home collection resource.
"""
@classmethod
@inlineCallbacks
def createHomeResource(cls, parent, record, transaction):
self = yield super(DirectoryAddressBookHomeResource, cls).createHomeResource(
parent, record.uid, transaction)
self.record = record
returnValue(self)
def principalForRecord(self):
return self.parent.principalForRecord(self.record)
| {
"content_hash": "e3e3bd7157b7709076a4996c2dee67be",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 94,
"avg_line_length": 27.106870229007633,
"alnum_prop": 0.6775556181357364,
"repo_name": "red-hood/calendarserver",
"id": "5bddcf64e2fa1a9ec23d3a62cf481b018e1a11c1",
"size": "7709",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "twistedcaldav/directory/addressbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
import logging
import os
import re
import subprocess
from telemetry.core.platform import device
from telemetry.core.platform.profiler import monsoon
from telemetry.core import util
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib import constants # pylint: disable=import-error
from pylib.device import device_errors # pylint: disable=import-error
from pylib.device import device_utils # pylint: disable=import-error
class AndroidDevice(device.Device):
""" Class represents information for connecting to an android device.
Attributes:
device_id: the device's serial string created by adb to uniquely
identify an emulator/device instance. This string can be found by running
'adb devices' command
enable_performance_mode: when this is set to True, android platform will be
set to high performance mode after browser is started.
"""
def __init__(self, device_id, enable_performance_mode=True):
super(AndroidDevice, self).__init__(
name='Android device %s' % device_id, guid=device_id)
self._device_id = device_id
self._enable_performance_mode = enable_performance_mode
@classmethod
def GetAllConnectedDevices(cls):
device_serials = GetDeviceSerials()
return [cls(s) for s in device_serials]
@property
def device_id(self):
return self._device_id
@property
def enable_performance_mode(self):
return self._enable_performance_mode
def _ListSerialsOfHealthyOnlineDevices():
return [d.adb.GetDeviceSerial()
for d in device_utils.DeviceUtils.HealthyDevices() if
d.IsOnline()]
def GetDeviceSerials():
"""Return the list of device serials of healthy devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
"""
device_serials = _ListSerialsOfHealthyOnlineDevices()
# The monsoon provides power for the device, so for devices with no
# real battery, we need to turn them on after the monsoon enables voltage
# output to the device.
if not device_serials:
try:
m = monsoon.Monsoon(wait=False)
m.SetUsbPassthrough(1)
m.SetVoltage(3.8)
m.SetMaxCurrent(8)
logging.warn("""
Monsoon power monitor detected, but no Android devices.
The Monsoon's power output has been enabled. Please now ensure that:
1. The Monsoon's front and back USB are connected to the host.
2. The device is connected to the Monsoon's main and USB channels.
3. The device is turned on.
Waiting for device...
""")
util.WaitFor(_ListSerialsOfHealthyOnlineDevices(), 600)
device_serials = _ListSerialsOfHealthyOnlineDevices()
except IOError:
return []
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in device_serials:
logging.warn(
'ANDROID_SERIAL is defined. Put %s in the first of the'
'discovered devices list.' % preferred_device)
device_serials.remove(preferred_device)
device_serials.insert(0, preferred_device)
return device_serials
def GetDevice(finder_options):
"""Return a Platform instance for the device specified by |finder_options|."""
if not CanDiscoverDevices():
logging.info(
'No adb command found. Will not try searching for Android browsers.')
return None
if finder_options.device and finder_options.device in GetDeviceSerials():
return AndroidDevice(
finder_options.device,
enable_performance_mode=not finder_options.no_performance_mode)
devices = AndroidDevice.GetAllConnectedDevices()
if len(devices) == 0:
logging.info('No android devices found.')
return None
if len(devices) > 1:
logging.warn(
'Multiple devices attached. Please specify one of the following:\n' +
'\n'.join([' --device=%s' % d.device_id for d in devices]))
return None
return devices[0]
def CanDiscoverDevices():
"""Returns true if devices are discoverable via adb."""
adb_path = constants.GetAdbPath()
if os.path.isabs(adb_path) and not os.path.exists(adb_path):
return False
try:
with open(os.devnull, 'w') as devnull:
adb_process = subprocess.Popen(
['adb', 'devices'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=devnull)
stdout = adb_process.communicate()[0]
if re.search(re.escape('????????????\tno permissions'), stdout) != None:
logging.warn('adb devices gave a permissions error. '
'Consider running adb as root:')
logging.warn(' adb kill-server')
logging.warn(' sudo `which adb` devices\n\n')
return True
except OSError:
pass
try:
os.environ['PATH'] = os.pathsep.join(
[os.path.dirname(adb_path), os.environ['PATH']])
device_utils.DeviceUtils.HealthyDevices()
return True
except (device_errors.CommandFailedError, device_errors.CommandTimeoutError,
OSError):
return False
def FindAllAvailableDevices(_):
"""Returns a list of available devices.
"""
if not CanDiscoverDevices():
return []
else:
return AndroidDevice.GetAllConnectedDevices()
| {
"content_hash": "3910200ca67467f14899d490a294976c",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 80,
"avg_line_length": 33.193548387096776,
"alnum_prop": 0.7049562682215743,
"repo_name": "chuan9/chromium-crosswalk",
"id": "105700f48cfe7be5d2f995603de8dc0041d75f97",
"size": "5307",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/platform/android_device.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9417055"
},
{
"name": "C++",
"bytes": "240920124"
},
{
"name": "CSS",
"bytes": "938860"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27258381"
},
{
"name": "Java",
"bytes": "14580273"
},
{
"name": "JavaScript",
"bytes": "20507007"
},
{
"name": "Makefile",
"bytes": "70992"
},
{
"name": "Objective-C",
"bytes": "1742904"
},
{
"name": "Objective-C++",
"bytes": "9967587"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "480579"
},
{
"name": "Python",
"bytes": "8519074"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from ataddrcompare.ataddrcompare import main
if __name__ == '__main__':
main()
| {
"content_hash": "38f5ff2c5fb0afcd853f8b093b4fe928",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 44,
"avg_line_length": 21,
"alnum_prop": 0.6428571428571429,
"repo_name": "gmgeo/at-address-compare",
"id": "7ce236700818ff82f70f91b2bab63ba69bbb9e7a",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ataddrcompare.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2069"
},
{
"name": "Python",
"bytes": "8295"
}
],
"symlink_target": ""
} |
import argparse
import json
import struct
import socket
import os
import sys
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print np
if np not in sys.path:
sys.path.append(np)
from time import sleep, time, strptime, mktime
from threading import Thread
from Queue import Queue, Empty
from multiprocessing.connection import Client
import util.log
from pprs.crypto_util import AESCipher
from pprs.route import Route
import pickle
import pprs.port_config as port_config
from copy import deepcopy
# Run each iteration for half an hour duration
update_minutes = 300
KEY_LENGTH = 16
SIX_PACK_RS = 0
SGX_RS = 1
ORIGIN_RS = 2
class ExaBGPEmulator(object):
def __init__(self, rs, address, port, input_file, speed_up, rate, mode, seperate_prefix):
self.logger = util.log.getLogger('xbgp')
self.logger.debug('init')
self.route_id_counter = 0
self.real_start_time = time()
self.simulation_start_time = 0
self.input_file = input_file
self.speed_up = speed_up
self.rs = rs
self.send_rate = int(rate)
self.mode = int(mode)
self.seperate_prefix = seperate_prefix
self.run = True
self.fp_thread = None
self.us_thread = None
self.update_queue = Queue()
if self.rs == SIX_PACK_RS:
self.logger.debug('connecting to RS1')
self.conn_rs1 = Client((port_config.process_assignement["rs1"], port_config.ports_assignment["rs1_receive_bgp_messages"]), authkey=None)
self.logger.debug('connected to RS1')
self.logger.debug('connecting to RS2')
self.conn_rs2 = Client((port_config.process_assignement["rs2"], port_config.ports_assignment["rs2_receive_bgp_messages"]), authkey=None)
self.logger.debug('connected to RS2')
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
self.logger.info('connecting to RS')
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((address, port))
self.logger.info('connected to RS')
def file_processor(self):
with open(self.input_file) as infile:
tmp = {}
next_hop = ""
flag = 0
for line in infile:
if line.startswith("TIME"):
flag = 1
tmp = {"exabgp": "3.4.8", "type": "update"}
next_hop = ""
x = line.split("\n")[0].split(": ")[1]
time = mktime(strptime(x, "%m/%d/%y %H:%M:%S"))
tmp["time"] = int(time/self.speed_up)
elif flag == 1:
if 'Keepalive' in line or line.startswith("\n"):
# Only process Update Messages
flag = 0
else:
x = line.split("\n")[0].split(": ")
if "neighbor" not in tmp:
tmp["neighbor"] = {"address": {}, "asn": {}, "message": {"update": {}}}
elif line.startswith("FROM"):
x = x[1].split(" ")
tmp["neighbor"]["ip"] = x[0]
tmp["neighbor"]["address"]["peer"] = x[0]
tmp["neighbor"]["asn"]["peer"] = x[1][2:]
elif line.startswith("TO"):
x = x[1].split(" ")
tmp["neighbor"]["address"]["local"] = x[0]
tmp["neighbor"]["asn"]["local"] = x[1][2:]
elif line.startswith("ORIGIN"):
if "attribute" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["attribute"] = {}
tmp["neighbor"]["message"]["update"]["attribute"]["origin"] = x[1].lower()
elif line.startswith("ASPATH"):
if "attribute" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["attribute"] = {}
tmp["neighbor"]["message"]["update"]["attribute"]["as-path"] = []
for asn in x[1].split(' '):
if asn[0] == '{':
for i in asn[1:-1].split(','):
tmp["neighbor"]["message"]["update"]["attribute"]["as-path"].append(int(i))
else:
tmp["neighbor"]["message"]["update"]["attribute"]["as-path"].append(int(asn))
elif line.startswith("MULTI_EXIT_DISC"):
if "attribute" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["attribute"] = {}
tmp["neighbor"]["message"]["update"]["attribute"]["med"] = x[1]
elif line.startswith("NEXT_HOP"):
if "announce" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["announce"] = {}
tmp["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"] = {x[1]: {}}
next_hop = x[1]
elif line.startswith("COMMUNITY"):
if "attribute" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["attribute"] = {}
tmp["neighbor"]["message"]["update"]["attribute"]["community"] = x[1]
elif line.startswith("ANNOUNCE"):
if "announce" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["announce"] = {"ipv4 unicast": {}}
flag = 2
elif line.startswith("WITHDRAW"):
tmp["neighbor"]["message"]["update"]["withdraw"] = {"ipv4 unicast": {}}
flag = 3
elif flag >= 2:
if line.startswith("\n"):
if not self.run:
break
if self.seperate_prefix:
if self.rs == SIX_PACK_RS:
routes = self.create_routes_to_be_sent(tmp)
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
routes = self.create_routes_per_prefix(tmp)
for route in routes:
route["route_id"] = self.route_id_counter
self.route_id_counter += 1
self.update_queue.put({'route': route, "time": tmp["time"]})
else:
# NOTE: process announcements only for testing
if "announce" in tmp["neighbor"]["message"]["update"]:
tmp["route_id"] = self.route_id_counter
self.route_id_counter += 1
self.logger.debug(str(tmp))
self.update_queue.put({'route': tmp, "time": tmp["time"]})
#self.logger.info("update_queue.qsize:" + str(self.update_queue.qsize()) + "route_id:%d" % tmp["route_id"])
while self.update_queue.qsize() > 32000:
self.logger.info('queue is full - taking a break')
sleep(self.sleep_time(tmp["time"])/2 + 0.001)
if not self.run:
break
flag = 0
else:
if line.startswith("ANNOUNCE"):
if "announce" not in tmp["neighbor"]["message"]["update"]:
tmp["neighbor"]["message"]["update"]["announce"] = {"ipv4 unicast": {}}
flag = 2
elif line.startswith("WITHDRAW"):
tmp["neighbor"]["message"]["update"]["withdraw"] = {"ipv4 unicast": {}}
flag = 3
else:
x = line.split("\n")[0].split()[0]
if flag == 2:
tmp["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"][next_hop][x] = {}
else:
tmp["neighbor"]["message"]["update"]["withdraw"]["ipv4 unicast"][x] = {}
self.run = False
print "file processor done"
def create_routes_per_prefix(self, bgp_update):
routes = []
if "announce" not in bgp_update["neighbor"]["message"]["update"]:
return routes
nh_dict = bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"]
for next_hop in nh_dict:
for prefix in nh_dict[next_hop]:
bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"] = {next_hop: {prefix: {}}}
routes.append(deepcopy(bgp_update))
return routes
def create_routes_to_be_sent(self, bgp_update):
# 1. generate key for the incoming route
key = os.urandom(KEY_LENGTH)
keystr = key.encode("hex")
self.cipher = AESCipher(key)
routes = []
# for each IP prefix destination add a route in the queue
if "announce" in bgp_update["neighbor"]["message"]["update"]:
# GENERATE ANNOUNCEMENTS
for next_hop in bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"]:
for prefix in bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"][next_hop]:
route = Route()
route.neighbor = bgp_update["neighbor"]["ip"]
route.prefix = prefix
route.time = bgp_update["time"]
route.id = self.route_id_counter
self.route_id_counter += 1
route.as_path = bgp_update["neighbor"]["message"]["update"]["attribute"]["as-path"]
route.next_hop = next_hop
if "community" in bgp_update["neighbor"]["message"]["update"]["attribute"]:
route.communities = bgp_update["neighbor"]["message"]["update"]["attribute"]["community"]
route.type = "announce"
encrypted_route = self.cipher.encrypt(pickle.dumps(route)) #encrypt serialized route object
routes.append({"prefix" : prefix, "asn" : bgp_update["neighbor"]["asn"]["peer"], "route-in-clear" : None, "route_id" : route.id, "encrypted_route" : encrypted_route, "key" : keystr, "type" : route.type , "announcement_id" : route.id})
return routes
def bgp_update_sender(self):
while self.run or not self.update_queue.empty():
try:
# get msg. type: {"route", "time"}
msg = self.update_queue.get(True, 1)
except Empty:
continue
if self.simulation_start_time == 0:
self.real_start_time = time()
self.simulation_start_time = msg["time"]
current_bgp_update = msg["time"]
elapsed = current_bgp_update - self.simulation_start_time
if elapsed > update_minutes:
print "start: current", self.simulation_start_time, current_bgp_update
break
sleep_time = self.sleep_time(msg["time"])
if sleep_time != 0:
print "current_bgp_update:", current_bgp_update, ", elapsed:", elapsed, ", sleep_time:", sleep_time
sleep(sleep_time)
#self.logger.info("route_id:%d " % msg["route"]["route_id"] + "Peer asn:%s " % msg["route"]["neighbor"]["asn"]["peer"] + "time(s):" + str(msg["time"]) + "sleep_time(s):" + str(sleep_time))
if self.rs == SIX_PACK_RS:
self.send_update_rs1(msg["route"])
self.send_update_rs2(msg["route"])
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
self.send_update_sgx(msg["route"])
self.stop()
def bgp_update_rate_sender(self):
current_count = 0
count = 0
while not self.update_queue.empty() or self.run:
try:
msg = self.update_queue.get(True, 1)
except Empty:
continue
if self.simulation_start_time == 0:
self.simulation_start_time = msg["time"]
current_bgp_update = msg["time"]
elapsed = current_bgp_update - self.simulation_start_time
if count > update_minutes:
print "start, current_msg_time, current_time", self.simulation_start_time, current_bgp_update, count
break
if current_count == self.send_rate:
current_count = 0
count += 1
print "elapsed:", count
sleep(1)
current_count += 1
if self.rs == SIX_PACK_RS:
self.send_update_rs1(msg["route"])
self.send_update_rs2(msg["route"])
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
self.send_update_sgx(msg["route"])
self.stop()
def bgp_update_fast_sender(self):
count = 0
while not self.update_queue.empty() or self.run:
try:
msg = self.update_queue.get(True, 1)
except Empty:
continue
count += 1
if self.rs == SIX_PACK_RS:
self.send_update_rs1(msg["route"])
self.send_update_rs2(msg["route"])
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
self.send_update_sgx(msg["route"])
print "total sent announcements: " + str(count)
self.stop()
def sleep_time(self, update_time):
time_diff = update_time - self.simulation_start_time
wake_up_time = self.real_start_time + time_diff
sleep_time = wake_up_time - time()
if sleep_time < 0:
sleep_time = 0
return sleep_time
def send_update(self, update):
self.conn.send(json.dumps(update))
def send_update_sgx(self, update):
s = json.dumps(update)
self.conn.send(struct.pack("H", len(s) + 2) + s)
def send_update_rs1(self, update):
self.conn_rs1.send(pickle.dumps(update))
def send_update_rs2(self, update):
self.conn_rs2.send(pickle.dumps(update))
def start(self):
self.logger.debug('start file processor')
self.fp_thread = Thread(target=self.file_processor)
self.fp_thread.start()
self.logger.debug('start update sender')
if self.mode == 0:
self.us_thread = Thread(target=self.bgp_update_sender)
self.us_thread.start()
if self.mode == 1:
self.us_thread = Thread(target=self.bgp_update_rate_sender)
self.us_thread.start()
if self.mode == 2:
self.us_thread = Thread(target=self.bgp_update_fast_sender)
self.us_thread.start()
def stop(self):
self.logger.debug('terminate')
print "send stop signal"
if self.rs == SIX_PACK_RS:
self.send_update_rs1({"stop": 1})
self.send_update_rs2({"stop": 1})
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
self.send_update_sgx({"stop": 1})
if self.run == True:
self.run = False
self.us_thread.join()
self.logger.debug('bgp update sender terminated')
self.fp_thread.join()
self.logger.debug('file processor terminated')
if self.rs == SIX_PACK_RS:
self.conn_rs1.close()
self.conn_rs2.close()
elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
self.conn.close()
def main(args):
speedup = args.speedup if args.speedup else 1
exabgp_instance = ExaBGPEmulator(args.rs, args.ip, args.port, args.input, speedup, args.rate, args.mode, args.seperate_prefix)
exabgp_instance.start()
while exabgp_instance.run:
try:
sleep(0.5)
except KeyboardInterrupt:
exabgp_instance.stop()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('rs', help='0: six-pack rs, 1: sgx rs, 2: orignl rs', type=int)
parser.add_argument('ip', help='ip address of the xrs')
parser.add_argument('port', help='port of the xrs', type=int)
parser.add_argument('input', help='bgp input file')
parser.add_argument('rate', help='bgp updates rate/second')
parser.add_argument('mode', help='xbgp mode 0: bgp update time based 1: bgp update rate based 2: as fast as possible')
parser.add_argument('--seperate_prefix', help='whether seperate prefix', action='store_true')
parser.add_argument('--speedup', help='speed up of replay', type=float)
args = parser.parse_args()
main(args)
| {
"content_hash": "7ae3d776ac822171d77e5b6b4d9c7b3e",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 254,
"avg_line_length": 43.80856423173803,
"alnum_prop": 0.4972401103955842,
"repo_name": "huxh10/iSDX",
"id": "e768ea7085241073fc4e779c863b07b207e9b8b1",
"size": "17457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xbgp/xbgp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "520"
},
{
"name": "Assembly",
"bytes": "4397822"
},
{
"name": "Batchfile",
"bytes": "30015"
},
{
"name": "C",
"bytes": "2013593"
},
{
"name": "C++",
"bytes": "2439608"
},
{
"name": "CSS",
"bytes": "134978"
},
{
"name": "HTML",
"bytes": "17269"
},
{
"name": "JavaScript",
"bytes": "367378"
},
{
"name": "Jolie",
"bytes": "412"
},
{
"name": "Makefile",
"bytes": "22430"
},
{
"name": "Objective-C",
"bytes": "2238"
},
{
"name": "PHP",
"bytes": "14432"
},
{
"name": "PLpgSQL",
"bytes": "61829"
},
{
"name": "Perl",
"bytes": "9352"
},
{
"name": "Python",
"bytes": "590454"
},
{
"name": "Roff",
"bytes": "1438620"
},
{
"name": "Ruby",
"bytes": "4820"
},
{
"name": "Shell",
"bytes": "25019"
},
{
"name": "Smarty",
"bytes": "53250"
},
{
"name": "Vim script",
"bytes": "10738"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import netaddr
from oslo_config import cfg
import six
from rally.common.i18n import _
from rally.common import logging
from rally.common import sshutils
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import atomic
from rally.task import utils
from rally.task import validation
LOG = logging.getLogger(__name__)
VM_BENCHMARK_OPTS = [
cfg.FloatOpt("vm_ping_poll_interval", default=1.0,
help="Interval between checks when waiting for a VM to "
"become pingable"),
cfg.FloatOpt("vm_ping_timeout", default=120.0,
help="Time to wait for a VM to become pingable")]
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(VM_BENCHMARK_OPTS, group=benchmark_group)
class Host(object):
ICMP_UP_STATUS = "ICMP UP"
ICMP_DOWN_STATUS = "ICMP DOWN"
name = "ip"
def __init__(self, ip):
self.ip = netaddr.IPAddress(ip)
self.status = self.ICMP_DOWN_STATUS
@property
def id(self):
return self.ip.format()
@classmethod
def update_status(cls, server):
"""Check ip address is pingable and update status."""
ping = "ping" if server.ip.version == 4 else "ping6"
if sys.platform.startswith("linux"):
cmd = [ping, "-c1", "-w1", server.ip.format()]
else:
cmd = [ping, "-c1", server.ip.format()]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
LOG.debug("Host %s is ICMP %s"
% (server.ip.format(), proc.returncode and "down" or "up"))
if proc.returncode == 0:
server.status = cls.ICMP_UP_STATUS
else:
server.status = cls.ICMP_DOWN_STATUS
return server
def __eq__(self, other):
if not isinstance(other, Host):
raise TypeError("%s should be an instance of %s" % (
other, Host.__class__.__name__))
return self.ip == other.ip and self.status == other.status
class VMScenario(nova_utils.NovaScenario, cinder_utils.CinderScenario):
"""Base class for VM scenarios with basic atomic actions.
VM scenarios are scenarios executed inside some launched VM instance.
"""
USER_RWX_OTHERS_RX_ACCESS_MODE = 0o755
RESOURCE_NAME_PREFIX = "rally_vm_"
@atomic.action_timer("vm.run_command_over_ssh")
def _run_command_over_ssh(self, ssh, command):
"""Run command inside an instance.
This is a separate function so that only script execution is timed.
:param ssh: A SSHClient instance.
:param command: Dictionary specifying command to execute.
See `rally info find VMTasks.boot_runcommand_delete' parameter
`command' docstring for explanation.
:returns: tuple (exit_status, stdout, stderr)
"""
validation.check_command_dict(command)
# NOTE(pboldin): Here we `get' the values and not check for the keys
# due to template-driven configuration generation that can leave keys
# defined but values empty.
if command.get("script_file") or command.get("script_inline"):
cmd = command["interpreter"]
if command.get("script_file"):
stdin = open(command["script_file"], "rb")
elif command.get("script_inline"):
stdin = six.moves.StringIO(command["script_inline"])
elif command.get("remote_path"):
cmd = command["remote_path"]
stdin = None
if command.get("local_path"):
remote_path = cmd[-1] if isinstance(cmd, (tuple, list)) else cmd
ssh.put_file(command["local_path"], remote_path,
mode=self.USER_RWX_OTHERS_RX_ACCESS_MODE)
if command.get("command_args"):
if not isinstance(cmd, (list, tuple)):
cmd = [cmd]
# NOTE(pboldin): `ssh.execute' accepts either a string interpreted
# as a command name or the list of strings that are converted into
# single-line command with arguments.
cmd = cmd + list(command["command_args"])
return ssh.execute(cmd, stdin=stdin)
def _boot_server_with_fip(self, image, flavor, use_floating_ip=True,
floating_network=None, **kwargs):
"""Boot server prepared for SSH actions."""
kwargs["auto_assign_nic"] = True
server = self._boot_server(image, flavor, **kwargs)
if not server.networks:
raise RuntimeError(
"Server `%s' is not connected to any network. "
"Use network context for auto-assigning networks "
"or provide `nics' argument with specific net-id." %
server.name)
if use_floating_ip:
fip = self._attach_floating_ip(server, floating_network)
else:
internal_network = list(server.networks)[0]
fip = {"ip": server.addresses[internal_network][0]["addr"]}
return server, {"ip": fip.get("ip"),
"id": fip.get("id"),
"is_floating": use_floating_ip}
@atomic.action_timer("vm.attach_floating_ip")
def _attach_floating_ip(self, server, floating_network):
internal_network = list(server.networks)[0]
fixed_ip = server.addresses[internal_network][0]["addr"]
fip = network_wrapper.wrap(self.clients, self).create_floating_ip(
ext_network=floating_network,
tenant_id=server.tenant_id, fixed_ip=fixed_ip)
self._associate_floating_ip(server, fip["ip"], fixed_address=fixed_ip,
atomic_action=False)
return fip
@atomic.action_timer("vm.delete_floating_ip")
def _delete_floating_ip(self, server, fip):
with logging.ExceptionLogger(
LOG, _("Unable to delete IP: %s") % fip["ip"]):
if self.check_ip_address(fip["ip"])(server):
self._dissociate_floating_ip(server, fip["ip"],
atomic_action=False)
network_wrapper.wrap(self.clients, self).delete_floating_ip(
fip["id"], wait=True)
def _delete_server_with_fip(self, server, fip, force_delete=False):
if fip["is_floating"]:
self._delete_floating_ip(server, fip)
return self._delete_server(server, force=force_delete)
@atomic.action_timer("vm.wait_for_ssh")
def _wait_for_ssh(self, ssh):
ssh.wait()
@atomic.action_timer("vm.wait_for_ping")
def _wait_for_ping(self, server_ip):
server = Host(server_ip)
utils.wait_for_status(
server,
ready_statuses=[Host.ICMP_UP_STATUS],
update_resource=Host.update_status,
timeout=CONF.benchmark.vm_ping_timeout,
check_interval=CONF.benchmark.vm_ping_poll_interval
)
def _run_command(self, server_ip, port, username, password, command,
pkey=None):
"""Run command via SSH on server.
Create SSH connection for server, wait for server to become available
(there is a delay between server being set to ACTIVE and sshd being
available). Then call run_command_over_ssh to actually execute the
command.
:param server_ip: server ip address
:param port: ssh port for SSH connection
:param username: str. ssh username for server
:param password: Password for SSH authentication
:param command: Dictionary specifying command to execute.
See `rally info find VMTasks.boot_runcommand_delete' parameter
`command' docstring for explanation.
:param pkey: key for SSH authentication
:returns: tuple (exit_status, stdout, stderr)
"""
pkey = pkey if pkey else self.context["user"]["keypair"]["private"]
ssh = sshutils.SSH(username, server_ip, port=port,
pkey=pkey, password=password)
self._wait_for_ssh(ssh)
return self._run_command_over_ssh(ssh, command)
| {
"content_hash": "921328235ac971dd7eb5a4b501cf9824",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 78,
"avg_line_length": 38.529680365296805,
"alnum_prop": 0.6038160701588055,
"repo_name": "eayunstack/rally",
"id": "f6e0c43ad4af6f38ecc0f53c535aa448a317df56",
"size": "9068",
"binary": false,
"copies": "2",
"ref": "refs/heads/product",
"path": "rally/plugins/openstack/scenarios/vm/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36716"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2988245"
},
{
"name": "Shell",
"bytes": "41128"
}
],
"symlink_target": ""
} |
"""
The script operates on such directories and files
|-- core
| `-- deps
| |-- emscripten
| `-- wasi-sdk
| `-- src
| |-- llvm-project
| `-- wasi-libc
`-- test-tools
|-- build-wasi-sdk
| |-- build_wasi_sdk.py
| |-- include
| `-- patches
`-- wasi-sdk
|-- bin
|-- lib
`-- share
`-- wasi-sysroot
"""
import hashlib
import logging
import os
import pathlib
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib
import urllib.request
logger = logging.getLogger("build_wasi_sdk")
external_repos = {
"config": {
"sha256": "302e5e7f3c4996976c58efde8b2f28f71d51357e784330eeed738e129300dc33",
"store_dir": "core/deps/wasi-sdk/src/config",
"strip_prefix": "config-191bcb948f7191c36eefe634336f5fc5c0c4c2be",
"url": "https://git.savannah.gnu.org/cgit/config.git/snapshot/config-191bcb948f7191c36eefe634336f5fc5c0c4c2be.tar.gz",
},
"emscripten": {
"sha256": "0904a65379aea3ea94087b8c12985b2fee48599b473e3bef914fec2e3941532d",
"store_dir": "core/deps/emscripten",
"strip_prefix": "emscripten-2.0.28",
"url": "https://github.com/emscripten-core/emscripten/archive/refs/tags/2.0.28.tar.gz",
},
"llvm-project": {
"sha256": "dc5169e51919f2817d06615285e9da6a804f0f881dc55d6247baa25aed3cc143",
"store_dir": "core/deps/wasi-sdk/src/llvm-project",
"strip_prefix": "llvm-project-34ff6a75f58377f32a5046a29f55c4c0e58bee9e",
"url": "https://github.com/llvm/llvm-project/archive/34ff6a75f58377f32a5046a29f55c4c0e58bee9e.tar.gz",
},
"wasi-sdk": {
"sha256": "fc4fdb0e97b915241f32209492a7d0fab42c24216f87c1d5d75f46f7c70a553d",
"store_dir": "core/deps/wasi-sdk",
"strip_prefix": "wasi-sdk-1a953299860bbcc198ad8c12a21d1b2e2f738355",
"url": "https://github.com/WebAssembly/wasi-sdk/archive/1a953299860bbcc198ad8c12a21d1b2e2f738355.tar.gz",
},
"wasi-libc": {
"sha256": "f6316ca9479d3463eb1c4f6a1d1f659bf15f67cb3c1e2e83d9d11f188dccd864",
"store_dir": "core/deps/wasi-sdk/src/wasi-libc",
"strip_prefix": "wasi-libc-a78cd329aec717f149934d7362f57050c9401f60",
"url": "https://github.com/WebAssembly/wasi-libc/archive/a78cd329aec717f149934d7362f57050c9401f60.tar.gz",
},
}
# TOOD: can we use headers from wasi-libc and clang directly ?
emscripten_headers_src_dst = [
("include/compat/emmintrin.h", "sse/emmintrin.h"),
("include/compat/immintrin.h", "sse/immintrin.h"),
("include/compat/smmintrin.h", "sse/smmintrin.h"),
("include/compat/xmmintrin.h", "sse/xmmintrin.h"),
("lib/libc/musl/include/pthread.h", "libc/musl/pthread.h"),
("lib/libc/musl/include/signal.h", "libc/musl/signal.h"),
("lib/libc/musl/include/netdb.h", "libc/musl/netdb.h"),
("lib/libc/musl/include/sys/wait.h", "libc/musl/sys/wait.h"),
("lib/libc/musl/include/sys/socket.h", "libc/musl/sys/socket.h"),
("lib/libc/musl/include/setjmp.h", "libc/musl/setjmp.h"),
("lib/libc/musl/arch/emscripten/bits/setjmp.h", "libc/musl/bits/setjmp.h"),
]
def checksum(name, local_file):
sha256 = hashlib.sha256()
with open(local_file, "rb") as f:
bytes = f.read(4096)
while bytes:
sha256.update(bytes)
bytes = f.read(4096)
return sha256.hexdigest() == external_repos[name]["sha256"]
def download(url, local_file):
logger.debug(f"download from {url}")
urllib.request.urlretrieve(url, local_file)
return local_file.exists()
def unpack(tar_file, strip_prefix, dest_dir):
# extract .tar.gz to /tmp, then move back without strippred prefix directories
with tempfile.TemporaryDirectory() as tmp:
with tarfile.open(tar_file) as tar:
logger.debug(f"extract to {tmp}")
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(tar, tmp)
strip_prefix_dir = (
pathlib.Path(tmp).joinpath(strip_prefix + os.path.sep).resolve()
)
if not strip_prefix_dir.exists():
logger.error(f"extract {tar_file.name} failed")
return False
# mv /tmp/${strip_prefix} dest_dir/*
logger.debug(f"move {strip_prefix_dir} to {dest_dir}")
shutil.copytree(
str(strip_prefix_dir),
str(dest_dir),
copy_function=shutil.move,
dirs_exist_ok=True,
)
return True
def download_repo(name, root):
if not name in external_repos:
logger.error(f"{name} is not a known repository")
return False
store_dir = root.joinpath(f'{external_repos[name]["store_dir"]}').resolve()
download_flag = store_dir.joinpath("DOWNLOADED")
if store_dir.exists() and download_flag.exists():
logger.info(
f"keep using '{store_dir.relative_to(root)}'. Or to remove it and try again"
)
return True
# download only when the target is neither existed nor broken
download_dir = pathlib.Path("/tmp/build_wasi_sdk/")
download_dir.mkdir(exist_ok=True)
tar_name = pathlib.Path(external_repos[name]["url"]).name
tar_file = download_dir.joinpath(tar_name)
if tar_file.exists():
if checksum(name, tar_file):
logger.debug(f"use pre-downloaded {tar_file}")
else:
logger.debug(f"{tar_file} is broken, remove it")
tar_file.unlink()
if not tar_file.exists():
if not download(external_repos[name]["url"], tar_file) or not checksum(
name, tar_file
):
logger.error(f"download {name} failed")
return False
# unpack and removing *strip_prefix*
if not unpack(tar_file, external_repos[name]["strip_prefix"], store_dir):
return False
# leave a FLAG
download_flag.touch()
# leave download files in /tmp
return True
def run_patch(patch_file, cwd):
if not patch_file.exists():
logger.error(f"{patch_file} not found")
return False
with open(patch_file, "r") as f:
try:
PATCH_DRY_RUN_CMD = "patch -f -p1 --dry-run"
if subprocess.check_call(shlex.split(PATCH_DRY_RUN_CMD), stdin=f, cwd=cwd):
logger.error(f"patch dry-run {cwd} failed")
return False
PATCH_CMD = "patch -f -p1"
f.seek(0)
if subprocess.check_call(shlex.split(PATCH_CMD), stdin=f, cwd=cwd):
logger.error(f"patch {cwd} failed")
return False
except subprocess.CalledProcessError:
logger.error(f"patch {cwd} failed")
return False
return True
def build_and_install_wasi_sdk(root):
store_dir = root.joinpath(f'{external_repos["wasi-sdk"]["store_dir"]}').resolve()
if not store_dir.exists():
logger.error(f"{store_dir} does not found")
return False
# patch wasi-libc and wasi-sdk
patch_flag = store_dir.joinpath("PATCHED")
if not patch_flag.exists():
if not run_patch(
root.joinpath("test-tools/build-wasi-sdk/patches/wasi_libc.patch"),
store_dir.joinpath("src/wasi-libc"),
):
return False
if not run_patch(
root.joinpath("test-tools/build-wasi-sdk/patches/wasi_sdk.patch"), store_dir
):
return False
patch_flag.touch()
else:
logger.info("bypass the patch phase")
# build
build_flag = store_dir.joinpath("BUILDED")
if not build_flag.exists():
BUILD_CMD = "make build"
if subprocess.check_call(shlex.split(BUILD_CMD), cwd=store_dir):
logger.error(f"build wasi-sdk failed")
return False
build_flag.touch()
else:
logger.info("bypass the build phase")
# install
install_flag = store_dir.joinpath("INSTALLED")
binary_path = root.joinpath("test-tools").resolve()
if not install_flag.exists():
shutil.copytree(
str(store_dir.joinpath("build/install/opt").resolve()),
str(binary_path),
dirs_exist_ok=True,
)
# install headers
emscripten_headers = (
root.joinpath(external_repos["emscripten"]["store_dir"])
.joinpath("system")
.resolve()
)
wasi_sysroot_headers = binary_path.joinpath(
"wasi-sdk/share/wasi-sysroot/include"
).resolve()
for (src, dst) in emscripten_headers_src_dst:
src = emscripten_headers.joinpath(src)
dst = wasi_sysroot_headers.joinpath(dst)
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src, dst)
install_flag.touch()
else:
logger.info("bypass the install phase")
return True
def main():
console = logging.StreamHandler()
console.setFormatter(logging.Formatter("%(asctime)s - %(message)s"))
logger.setLevel(logging.INFO)
logger.addHandler(console)
logger.propagate = False
# locate the root of WAMR
current_file = pathlib.Path(__file__)
if current_file.is_symlink():
current_file = pathlib.Path(os.readlink(current_file))
root = current_file.parent.joinpath("../..").resolve()
logger.info(f"The root of WAMR is {root}")
# download repos
for repo in external_repos.keys():
if not download_repo(repo, root):
return False
# build wasi_sdk and install
if not build_and_install_wasi_sdk(root):
return False
# TODO install headers from emscripten
return True
if __name__ == "__main__":
sys.exit(0 if main() else 1)
| {
"content_hash": "7194c346b2d05e4ca96bafdd073e1e4e",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 126,
"avg_line_length": 33.53525641025641,
"alnum_prop": 0.6069960814298002,
"repo_name": "bytecodealliance/wasm-micro-runtime",
"id": "a87cdef69e1a6432131644593ec332ea91257952",
"size": "10611",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test-tools/build-wasi-sdk/build_wasi_sdk.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "34215"
},
{
"name": "Batchfile",
"bytes": "3434"
},
{
"name": "C",
"bytes": "4916090"
},
{
"name": "C++",
"bytes": "336440"
},
{
"name": "CMake",
"bytes": "126550"
},
{
"name": "CSS",
"bytes": "16329"
},
{
"name": "Dockerfile",
"bytes": "9272"
},
{
"name": "Go",
"bytes": "32942"
},
{
"name": "HTML",
"bytes": "27470"
},
{
"name": "JavaScript",
"bytes": "14671"
},
{
"name": "Makefile",
"bytes": "26883"
},
{
"name": "Python",
"bytes": "368061"
},
{
"name": "Shell",
"bytes": "72640"
},
{
"name": "TypeScript",
"bytes": "90504"
},
{
"name": "WebAssembly",
"bytes": "21074"
}
],
"symlink_target": ""
} |
__author__ = 'lauft'
import os
from stat import *
class CheckPath(object):
"""
check whether a path meets certain requirements
"""
def __init__(self, path):
self.path = path
def does_exist(self):
"""
:rtype : bool
"""
return os.path.exists(self.path)
def does_not_exist(self):
return not self.does_exist()
def is_a_file(self):
return os.path.isfile(self.path)
def is_not_a_file(self):
return not self.is_a_file()
def is_a_directory(self):
return os.path.isdir(self.path)
def is_not_a_directory(self):
return not self.is_a_directory()
def is_an_executable(self):
if self.is_not_a_file():
return False
return os.stat(self.path).st_mode & S_IXUSR
def has_permissions(self, mode):
file_mode = S_IMODE(os.stat(self.path).st_mode) & 0777
return file_mode == mode
def does_contain(self, item):
path = os.path.join(self.path, item)
return os.path.exists(path)
def does_not_contain(self, item):
return not self.does_contain(item) | {
"content_hash": "4408a0de80c4b75441245069252438da",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 62,
"avg_line_length": 22.49019607843137,
"alnum_prop": 0.5797733217088056,
"repo_name": "lauft/pyCheck",
"id": "23803eb70bb9628c3ab0479254c5ebfa3d06b61c",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycheck/checkpath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14313"
}
],
"symlink_target": ""
} |
import socketserver
class TCPHandler(socketserver.StreamRequestHandler):
BASE_PATH = "/Users/aria/temp/tcp/"
def handle(self):
data = self.request.recv(1024).strip()
file_name = data.decode("utf-8")
print("file_name: %s" % file_name)
print("{} wrote:".format(self.client_address[0]))
with open(self.BASE_PATH + file_name, "rb") as f:
b = f.read(1024)
if b:
self.wfile.write(b)
else:
print("发送成功")
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
with socketserver.TCPServer((HOST, PORT), TCPHandler) as server:
server.serve_forever()
| {
"content_hash": "d085adc358949a570c9e8477ce418516",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 26.26923076923077,
"alnum_prop": 0.5592972181551976,
"repo_name": "AriaLyy/Aria",
"id": "41bb5549d6f65b85ab462898a87a61cd5c092c18",
"size": "691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/TcpDownload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2567146"
},
{
"name": "Kotlin",
"bytes": "14810"
},
{
"name": "Python",
"bytes": "3348"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class SnapshotsOperations(object):
"""SnapshotsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def _create_or_update_initial(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(snapshot, 'Snapshot')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk
operation.
:type snapshot: ~azure.mgmt.compute.v2017_03_30.models.Snapshot
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Snapshot or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.Snapshot]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _update_initial(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(snapshot, 'SnapshotUpdate')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch
snapshot operation.
:type snapshot: ~azure.mgmt.compute.v2017_03_30.models.SnapshotUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Snapshot or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.Snapshot]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Snapshot or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_03_30.models.Snapshot or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _delete_initial(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Snapshot
:rtype:
~azure.mgmt.compute.v2017_03_30.models.SnapshotPaged[~azure.mgmt.compute.v2017_03_30.models.Snapshot]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists snapshots under a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Snapshot
:rtype:
~azure.mgmt.compute.v2017_03_30.models.SnapshotPaged[~azure.mgmt.compute.v2017_03_30.models.Snapshot]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def _grant_access_initial(
self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def grant_access(
self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param access: Possible values include: 'None', 'Read'
:type access: str or
~azure.mgmt.compute.v2017_03_30.models.AccessLevel
:param duration_in_seconds: Time duration in seconds until the SAS
access expires.
:type duration_in_seconds: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns AccessUri or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.AccessUri]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._grant_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
access=access,
duration_in_seconds=duration_in_seconds,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _revoke_access_initial(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def revoke_access(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._revoke_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| {
"content_hash": "2de406b4fd76da53a860cdccc45229dc",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 150,
"avg_line_length": 43.57829839704069,
"alnum_prop": 0.6344575858751627,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "ad0291d164ff626d5f5ac6f3af6cc67a85d53235",
"size": "35816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/snapshots_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from neutron.api.v2 import attributes as attr
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import portsecurity as psec
from neutron import manager
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.'
'AllowedAddressPairTestPlugin')
class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
super(AllowedAddressPairTestCase, self).setUp(plugin)
# Check if a plugin supports security groups
plugin_obj = manager.NeutronManager.get_plugin()
self._skip_port_security = ('port-security' not in
plugin_obj.supported_extension_aliases)
class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
addr_pair_db.AllowedAddressPairsMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with port security and allowed address pairs.
"""
supported_extension_aliases = ["allowed-address-pairs"]
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
self._process_create_allowed_address_pairs(
context, p,
p[addr_pair.ADDRESS_PAIRS])
else:
p[addr_pair.ADDRESS_PAIRS] = None
return port['port']
def update_port(self, context, id, port):
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
if (delete_addr_pairs or has_addr_pairs):
# delete address pairds and readd them
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port,
ret_port[addr_pair.ADDRESS_PAIRS])
return ret_port
class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
super(AllowedAddressPairDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
def test_create_port_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_true_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_false_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=False,
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_port_bad_mac(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1222'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_missing_ip_field(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_duplicate_mac_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_extra_args(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1',
'icbb': 'agreed'}]
self._create_port_with_address_pairs(address_pairs, 400)
def _create_port_with_address_pairs(self, address_pairs, ret_code):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, ret_code)
def test_update_add_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_address_gets_port_mac(self):
with self.network() as net:
address_pairs = [{'ip_address': '23.23.23.23'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
self.assertEqual(port_addr_mac,
port['mac_address'])
self._delete('ports', port['id'])
def test_update_port_security_off_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {psec.PORTSECURITY: False}}
# If plugin implements security groups we also need to remove
# the security group on port.
plugin_obj = manager.NeutronManager.get_plugin()
if 'security-groups' in plugin_obj.supported_extension_aliases:
update_port['port']['security_groups'] = []
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
self._delete('ports', port['port']['id'])
def test_create_port_remove_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
self._delete('ports', port['port']['id'])
class TestAllowedAddressPairsXML(TestAllowedAddressPairs):
fmt = 'xml'
| {
"content_hash": "2bb6ee397979279908e0aed07f968687",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 79,
"avg_line_length": 48.60434782608696,
"alnum_prop": 0.5414616692011808,
"repo_name": "SnabbCo/neutron",
"id": "850ebc43a5256edb88f8df4f065bafe829391825",
"size": "11771",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_allowedaddresspairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Unit test for eventmgr - processing Zookeeper events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import io
import os
import shutil
import tempfile
import unittest
import kazoo
import mock
import treadmill
from treadmill import context
from treadmill import eventmgr
from treadmill import yamlwrapper as yaml
from treadmill.tests.testutils import mockzk
class MockEventObject:
"""Mock event object."""
def __init__(self):
self._is_set = False
def clear(self):
"""Clear the internal flag to false."""
self._is_set = False
def set(self):
"""Set the internal flag to true."""
self._is_set = True
def is_set(self):
"""Return true if the internal flag is set to true, false otherwise."""
return self._is_set
def mock_event_object():
"""Return mock event object."""
return MockEventObject()
class EventMgrTest(mockzk.MockZookeeperTestCase):
"""Mock test for treadmill.eventmgr.EventMgr."""
@mock.patch('treadmill.appenv.AppEnvironment', mock.Mock(autospec=True))
@mock.patch('treadmill.watchdog.Watchdog', mock.Mock(autospec=True))
def setUp(self):
self.root = tempfile.mkdtemp()
self.cache = os.path.join(self.root, 'cache')
os.mkdir(self.cache)
context.GLOBAL.cell = 'test'
context.GLOBAL.zk.url = 'zookeeper://xxx@yyy:123'
self.evmgr = eventmgr.EventMgr(root=self.root)
self.evmgr.tm_env.root = self.root
self.evmgr.tm_env.cache_dir = self.cache
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('time.sleep', mock.Mock())
@mock.patch('treadmill.context.GLOBAL.zk', mock.Mock())
def test_run(self):
"""Test EventMgr run method.
"""
mock_zkclient = mock.Mock()
context.GLOBAL.zk.conn = mock_zkclient
mock_data_watch = mock.Mock(
side_effect=lambda func: func({'valid_until': 123.0}, None, None)
)
mock_zkclient.get.return_value = (b'{}', mock.Mock(ctime=1000))
mock_zkclient.exits.return_value = mock.Mock()
# Decorator style watch
mock_zkclient.DataWatch.return_value = mock_data_watch
# Function style watch
mock_zkclient.ChildrenWatch.side_effect = lambda _path, func: func(
['foo.bar#0']
)
mock_zkclient.handler.event_object.side_effect = mock_event_object
self.evmgr.run(once=True)
self.assertTrue(os.path.exists(os.path.join(self.cache, '.ready')))
self.assertTrue(os.path.exists(os.path.join(self.cache, 'foo.bar#0')))
mock_watchdog = self.evmgr.tm_env.watchdogs
mock_watchdog.create.assert_called_with(
content=mock.ANY,
name='svc-EventMgr',
timeout='120s'
)
mock_watchdog_lease = mock_watchdog.create.return_value
mock_watchdog_lease.heartbeat.assert_called_with()
# The main loop terminates immediately
mock_watchdog_lease.remove.assert_called_with()
@mock.patch('time.sleep', mock.Mock())
@mock.patch('treadmill.context.GLOBAL.zk', mock.Mock())
def test_run_presence_not_ready(self):
"""Test EventMgr run method - no server presence.
"""
mock_zkclient = mock.Mock()
context.GLOBAL.zk.conn = mock_zkclient
mock_data_watch = mock.Mock(
side_effect=lambda func: func(None, None, None)
)
mock_zkclient.get.return_value = (b'{}', mock.Mock(ctime=1000))
mock_zkclient.exits.return_value = mock.Mock()
# Decorator style watch
mock_zkclient.DataWatch.return_value = mock_data_watch
# Function style watch
mock_zkclient.ChildrenWatch.side_effect = lambda _path, func: func(
['foo.bar#0']
)
mock_zkclient.handler.event_object.side_effect = mock_event_object
self.evmgr.run(once=True)
self.assertFalse(os.path.exists(os.path.join(self.cache, '.ready')))
self.assertTrue(os.path.exists(os.path.join(self.cache, 'foo.bar#0')))
@mock.patch('time.sleep', mock.Mock())
@mock.patch('treadmill.context.GLOBAL.zk', mock.Mock())
def test_run_placement_not_ready(self):
"""Test EventMgr run method - no placement.
"""
mock_zkclient = mock.Mock()
context.GLOBAL.zk.conn = mock_zkclient
mock_data_watch = mock.Mock(
side_effect=lambda func: func({'valid_until': 123.0}, None, None)
)
mock_zkclient.exists.return_value = None
mock_zkclient.DataWatch.return_value = mock_data_watch
mock_zkclient.handler.event_object.side_effect = mock_event_object
self.evmgr.run(once=True)
self.assertFalse(os.path.exists(os.path.join(self.cache, '.ready')))
@mock.patch('treadmill.zkutils.get', mock.Mock())
@mock.patch('treadmill.zkutils.get_with_metadata', mock.Mock())
def test__cache(self):
"""Test application cache event.
"""
# Access to a protected member _cache of a client class
# pylint: disable=W0212
treadmill.zkutils.get.return_value = {}
treadmill.zkutils.get_with_metadata.return_value = (
{}, mock.Mock(ctime=1000)
)
zkclient = kazoo.client.KazooClient()
self.evmgr._cache(zkclient, 'foo#001')
appcache = os.path.join(self.cache, 'foo#001')
self.assertTrue(os.path.exists(appcache))
@mock.patch('treadmill.zkutils.get', mock.Mock())
@mock.patch('treadmill.zkutils.get_with_metadata', mock.Mock())
def test__cache_placement_notfound(self):
"""Test application cache event when placement is not found.
"""
# Access to a protected member _cache of a client class
# pylint: disable=W0212
treadmill.zkutils.get.return_value = {}
treadmill.zkutils.get_with_metadata.side_effect = \
kazoo.exceptions.NoNodeError
zkclient = kazoo.client.KazooClient()
self.evmgr._cache(zkclient, 'foo#001')
appcache = os.path.join(self.cache, 'foo#001')
self.assertFalse(os.path.exists(appcache))
@mock.patch('treadmill.zkutils.get', mock.Mock())
@mock.patch('treadmill.zkutils.get_with_metadata', mock.Mock())
def test__cache_app_notfound(self):
"""Test application cache event when app is not found.
"""
# Access to a protected member _cache of a client class
# pylint: disable=W0212
treadmill.zkutils.get.side_effect = \
kazoo.exceptions.NoNodeError
treadmill.zkutils.get_with_metadata.return_value = (
{}, mock.Mock(ctime=1000)
)
zkclient = kazoo.client.KazooClient()
self.evmgr._cache(zkclient, 'foo#001')
appcache = os.path.join(self.cache, 'foo#001')
self.assertFalse(os.path.exists(appcache))
@mock.patch('treadmill.zkutils.get', mock.Mock())
@mock.patch('treadmill.zkutils.get_with_metadata', mock.Mock())
@mock.patch('treadmill.fs.write_safe', mock.Mock())
@mock.patch('os.stat', mock.Mock())
def test__cache_check_existing(self):
"""Test checking if the file already exists in cache and is up to date.
"""
# Access to a protected member _cache of a client class
# pylint: disable=W0212
treadmill.zkutils.get.return_value = {}
treadmill.zkutils.get_with_metadata.return_value = (
{}, mock.Mock(ctime=1000)
)
zkclient = kazoo.client.KazooClient()
# File doesn't exist.
os.stat.side_effect = FileNotFoundError
self.evmgr._cache(zkclient, 'foo#001', check_existing=True)
treadmill.fs.write_safe.assert_called()
# File is up to date.
treadmill.fs.write_safe.reset_mock()
os.stat.side_effect = None
os.stat.return_value = mock.Mock(st_ctime=2)
self.evmgr._cache(zkclient, 'foo#001', check_existing=True)
treadmill.fs.write_safe.assert_not_called()
# File is out of date.
treadmill.fs.write_safe.reset_mock()
os.stat.return_value = mock.Mock(st_ctime=0)
self.evmgr._cache(zkclient, 'foo#001', check_existing=True)
treadmill.fs.write_safe.assert_called()
@mock.patch('glob.glob', mock.Mock())
@mock.patch('treadmill.eventmgr.EventMgr._cache', mock.Mock())
def test__synchronize(self):
"""Check that app events are synchronized properly."""
# Access to a protected member _synchronize of a client class
# pylint: disable=W0212
existing_apps = []
glob.glob.return_value = (app for app in existing_apps)
zkclient = kazoo.client.KazooClient()
self.evmgr._synchronize(zkclient, ['foo#001'])
# cache should have been called with 'foo' app
treadmill.eventmgr.EventMgr._cache.assert_called_with(
zkclient, 'foo#001')
@mock.patch('glob.glob', mock.Mock())
@mock.patch('os.unlink', mock.Mock())
@mock.patch('treadmill.eventmgr.EventMgr._cache', mock.Mock())
def test__synchronize_empty(self):
"""Check synchronized properly remove extra apps."""
# Access to a protected member _synchronize of a client class
# pylint: disable=W0212
existing_apps = ['proid.app#0', 'proid.app#1', 'proid.app#2']
glob.glob.return_value = (app for app in existing_apps)
zkclient = kazoo.client.KazooClient()
self.evmgr._synchronize(zkclient, [])
os.unlink.assert_has_calls(
[
mock.call(os.path.join(self.cache, app))
for app in existing_apps
],
any_order=True
)
self.assertFalse(treadmill.eventmgr.EventMgr._cache.called)
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_cache_placement_data(self):
"""Test sync of placement data.
"""
# Access to a protected member _synchronize of a client class
# pylint: disable=W0212
zk_content = {
'placement': {
'test.xx.com': {
'.data': """
state: up
since: 100
""",
'xxx.app1#1234': {
'.data': '{identity: 1}\n',
},
}
},
'scheduled': {
'xxx.app1#1234': {
'affinity': 'app1',
'memory': '1G',
'disk': '1G',
'cpu': '100%',
'identity_group': 'xxx.app1',
},
}
}
self.make_mock_zk(zk_content)
zkclient = kazoo.client.KazooClient()
self.evmgr._hostname = 'test.xx.com'
self.evmgr._cache(zkclient, 'xxx.app1#1234')
appcache = os.path.join(self.cache, 'xxx.app1#1234')
self.assertTrue(os.path.exists(appcache))
with io.open(appcache) as f:
data = yaml.load(stream=f)
self.assertEqual(data['identity'], 1)
def test__cache_notify(self):
"""Test sending a cache status notification event."""
# Access to a protected member _cache_notify of a client class
# pylint: disable=W0212
ready_file = os.path.join(self.cache, '.ready')
self.evmgr._cache_notify(True)
self.assertTrue(os.path.exists(ready_file))
self.evmgr._cache_notify(False)
self.assertFalse(os.path.exists(ready_file))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6359e03314f34a65f869e218cbf847f4",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 79,
"avg_line_length": 34.01714285714286,
"alnum_prop": 0.604821098605745,
"repo_name": "ceache/treadmill",
"id": "62ffd18ffd4f8cf5d0a82cb3960cc9dca2dd63f0",
"size": "11906",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/tests/eventmgr_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3362298"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
} |
import distutils.spawn
import traceback
import os
import shutil
import subprocess
import re
import sys
from ansible import errors
from ansible.callbacks import vvv
import ansible.constants as C
from ansible.runner.connection_plugins.ssh import Connection as SSHConn
class Connection(object):
''' jail-over-ssh based connections '''
def match_jail(self):
if self.jid == None:
code, _, stdout, stderr = self._exec_command("jls -q jid name host.hostname path")
if code != 0:
vvv("JLS stdout: %s" % stdout)
raise errors.AnsibleError("jls returned non-zero!")
lines = stdout.strip().split('\n')
found = False
for line in lines:
if line.strip() == '':
break
jid, name, hostname, path = line.strip().split()
if name == self.jailspec or hostname == self.jailspec:
self.jid = jid
self.jname = name
self.jhost = hostname
self.jpath = path
found = True
break
if not found:
raise errors.AnsibleError("failed to find a jail with name or hostname of '%s'" % self.jailspec)
def get_jail_path(self):
self.match_jail()
return self.jpath
def get_jail_id(self):
self.match_jail()
return self.jid
def get_tmp_file(self):
code, _, stdout, stderr = self._exec_command('mktemp', '', None)
return stdout.strip().split('\n')[-1]
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
# my-jail@my.jailhost => my-jail is jail name/hostname, my.jailhost is jailhost hostname
self.host = host
self.jailspec, self.jailhost = host.split('@',1)
# piggyback off of the standard SSH connection
self.runner = runner
self.has_pipelining = False
self.ssh = SSHConn(runner, self.jailhost, port, user, password, private_key_file, *args)
# jail information loaded on first use by match_jail
self.jid = None
self.jname = None
self.jhost = None
self.jpath = None
def connect(self, port=None):
self.ssh.connect();
return self
# runs a command on the jailhost, rather than inside the jail
def _exec_command(self, cmd, tmp_path='', become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
return self.ssh.exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command in the jail '''
if executable:
cmd = ' '.join([executable, '-c', '"%s"' % cmd])
local_cmd = 'which -s jailme && jailme %s %s || jexec %s %s' % (
self.get_jail_id(), cmd,
self.get_jail_id(), cmd
)
vvv("JAIL (%s) %s" % (become_user, local_cmd), host=self.host)
return self._exec_command(local_cmd, tmp_path, become_user, True, executable, in_data)
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote jail '''
tmp = self.get_tmp_file()
self.ssh.put_file(in_path, tmp)
out_path = self._normalize_path(out_path, self.get_jail_path())
code, _, stdout, stderr = self._exec_command(' '.join(['chmod 0644',tmp]))
if code != 0:
raise errors.AnsibleError("failed to make temp file %s world readable:\n%s\n%s" % (tmp, stdout, stderr))
code, _, stdout, stderr = self._exec_command(' '.join(['cp',tmp,out_path]), '', self.runner.become_user, True)
if code != 0:
raise errors.AnsibleError("failed to move file from %s to %s:\n%s\n%s" % (tmp, out_path, stdout, stderr))
code, _, stdout, stderr = self._exec_command(' '.join(['rm',tmp]))
if code != 0:
raise errors.AnsibleError("failed to remove temp file %s:\n%s\n%s" % (tmp, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote jail to local '''
tmp = self.get_tmp_file()
in_path = self._normalize_path(in_path, self.get_jail_path())
self._exec_command(' '.join(['mv',in_path,tmp]), '', self.juser, True)
self.ssh.fetch_file(tmp, out_path)
def close(self):
''' terminate the connection; nothing to do here '''
pass
| {
"content_hash": "198c3f84c34513bc2a877e3d4047bea6",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 118,
"avg_line_length": 37.88095238095238,
"alnum_prop": 0.57950974230044,
"repo_name": "nkiraly/ansible-sshjail",
"id": "81b1c1470993da7fdb41a7d751a978995823d82f",
"size": "4773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sshjail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4773"
}
],
"symlink_target": ""
} |
import copy
import random
from core import actor, bullet, particle
from core.actor import *
from ui import text, infobubble
from utils import utility
def load_data():
Player.master_animation_list.build_animation('Idle', ['kuunIdle'])
Player.master_animation_list.build_animation('Fire', ['kuunShoot'])
Player.master_animation_list.build_animation('HurtIdle', ['kuunIdle', 'blank'])
Player.master_animation_list.build_animation('HurtFire', ['kuunShoot', 'blank'])
Player.master_animation_list.build_animation('Die', ['kuunDie'])
Player.NUM_OW_SOUNDS = 2 #plus one for a total of 3
Player.lose_life_sound.append(utility.load_sound('ow1'))
Player.lose_life_sound.append(utility.load_sound('ow2'))
Player.lose_life_sound.append(utility.load_sound('ow3'))
Player.NUM_FIRE_SOUNDS = 2 #plus one for total of 3
Player.fire_sound.append(utility.load_sound('shot1'))
Player.fire_sound.append(utility.load_sound('shot2'))
Player.fire_sound.append(utility.load_sound('shot3'))
Player.death_sound.append(utility.load_sound('playerDeath1'))
Player.death_sound.append(utility.load_sound('playerDeath2'))
Player.death_sound.append(utility.load_sound('playerDeath3'))
Player.extraLifeSound = utility.load_sound('extraLife')
class Player(actor.Actor):
death_sound = []
fire_sound = []
lose_life_sound = []
master_animation_list = animation.Animation()
def __init__(self, bullet_group, effects_group, life_board, score_board):
actor.Actor.__init__(self)
# COMMON VARIABLES
self.actor_type = ACTOR_PLAYER
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.bound_style = BOUND_STYLE_REFLECT
self.bounds = 0 + 46, 0 + 60, SCREEN_WIDTH - 46, SCREEN_HEIGHT - 32
self.can_collide = True
self.hitrect = pygame.Rect(0, 0, 80, 90)
self.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4))
self.velocity = vector.Vector2d.zero
# UNIQUE VARIABLES
self.bullet_speed = BULLET_SPEED
self.default_fire_timer = 2
self.reset_fire_timer = self.default_fire_timer
self.fire_timer = self.reset_fire_timer
self.max_speed = 54
self.hitrect_offset_y = -15
self.score = 0
self.lives = 3
self.stun_timer = 0
self.life_board = life_board
self.score_board = score_board
self.life_board.set_text('x' + str(self.lives))
self.score_board.set_text(self.score)
self.next_bonus = 50000
self.dying = 0
self.dead = False
# BONUS VARIABLES
self.damage_bonus = 0
self.bullet_bonus = 0
self.reflect_bonus = 0
self.dual_shot = 0
self.fast_shot = 0
self.point_bonus = 0
self.combo_bonus = 0
self.combo_kills = 0
# BULLET VARIABLES
self.bullet_damage = 1
self.bullet_bound_style = BOUND_STYLE_KILL
self.bullet_collide_style = COLLIDE_STYLE_HURT
self.bullet_group = bullet_group
self.effects_group = effects_group
# SOUND VARIABLES
self.current_sound = 0
def actor_update(self):
if self.lives <= 0:
self.active = False
self.velocity -= vector.Vector2d(0.0, -0.3)
self.die()
return
if not self.damage_bonus:
self.bullet_damage = 1
if not self.reflect_bonus:
self.bullet_bound_style = BOUND_STYLE_KILL
self.bullet_collide_style = COLLIDE_STYLE_HURT
if not self.fast_shot:
self.reset_fire_timer = self.default_fire_timer
if self.point_bonus:
self.point_bonus -= 1
if self.damage_bonus:
self.damage_bonus -= 1
if self.reflect_bonus:
self.reflect_bonus -= 1
if self.dual_shot:
self.dual_shot -= 1
if self.stun_timer:
self.stun_timer -= 1
if self.fast_shot:
self.fast_shot -= 1
if self.combo_bonus:
self.combo_bonus -= 1
if not self.combo_bonus:
combo_counter = 0
bonus_points = 0
while combo_counter <= self.combo_kills:
combo_counter += 1
bonus_points += combo_counter * 25
self.increment_score_no_text(bonus_points)
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Combo Points:' + str(bonus_points) + '!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.bullet_group.add(help_bubble)
self.combo_kills = 0
self.fire_timer -= 1
self.velocity *= .95
if not self.active:
self.active = True
if not self.fire_timer:
self.animation_list.stop('Idle', self.animation_list.current_frame)
if self.stun_timer:
self.animation_list.play('HurtIdle', self.animation_list.current_frame)
def die(self):
if self.dying == 0:
death_type = int(random.random() * 3)
if death_type == 0:
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Blast!').image
utility.play_sound(self.death_sound[0], OW_CHANNEL)
elif death_type == 1:
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Oh No!').image
utility.play_sound(self.death_sound[1], OW_CHANNEL)
elif death_type == 2:
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Bother!').image
utility.play_sound(self.death_sound[2], OW_CHANNEL)
self.animation_list.play('Die')
self.bounds = -1000, -1000, SCREEN_WIDTH + 1000, SCREEN_HEIGHT + 32
self.bound_style = BOUND_STYLE_CUSTOM
help_bubble = infobubble.InfoBubble(temp_image, self, 5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.bullet_group.add(help_bubble)
self.dying += 1
if settings_list[PARTICLES] and not self.dying % 2:
puffs_to_create = 4
while puffs_to_create:
puffs_to_create -= 1
temp_puff = particle.SmokeParticle(self.position, (1, 0))
temp_puff.velocity.set_angle(359 * random.random())
self.effects_group.add(temp_puff)
def custom_bounds(self):
self.dead = True
def hurt(self, value):
if self.stun_timer <= 0:
self.animation_list.play('HurtIdle', self.animation_list.current_frame)
self.lives -= value
sound_to_play = random.randint(0, 2)
if self.lives != 0:
utility.play_sound(self.lose_life_sound[sound_to_play], OW_CHANNEL)
self.life_board.set_text('x' + str(self.lives))
self.stun_timer = 1.5 * FRAMES_PER_SECOND
def increment_score_no_text(self, value):
self.score += value
self.score_board.set_text(self.score)
if self.score > self.next_bonus:
utility.play_sound(self.extraLifeSound, OW_CHANNEL)
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Extra Life!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.effects_group.add(help_bubble)
self.lives += 1
self.life_board.set_text('x' + str(self.lives))
self.next_bonus += 50000
def increment_score(self, value, textPosition, text_group):
if self.combo_bonus and value <= 250:
self.combo_bonus += int(.2 * FRAMES_PER_SECOND)
self.combo_kills += 1
temp_image = text.Text(FONT_PATH, 30, FONT_COLOR, 'x' + str(self.combo_kills) + '!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 0.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.bullet_group.add(help_bubble)
if self.point_bonus:
value *= 2
temp_text = text.Text(FONT_PATH, 36, FONT_COLOR, str(value), 15)
temp_text.set_alignment(CENTER_MIDDLE)
temp_text.position = vector.Vector2d(textPosition)
text_group.add(temp_text)
self.score += value
self.score_board.set_text(self.score)
if self.score >= self.next_bonus:
utility.play_sound(self.extraLifeSound, OW_CHANNEL)
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Extra Life!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
text_group.add(help_bubble)
self.lives += 1
self.life_board.set_text('x' + str(self.lives))
self.next_bonus += 50000
def fire(self):
if self.stun_timer:
self.animation_list.play('HurtFire', self.animation_list.current_frame)
else:
self.animation_list.play('Fire')
if self.fire_timer <= 0:
utility.play_sound(self.fire_sound[random.randint(0, 2)], PLAYER_CHANNEL)
if self.velocity[:] != vector.Vector2d.zero:
bullet_velocity = vector.Vector2d(self.velocity)
bullet_velocity.set_magnitude(self.bullet_speed)
new_bullet = bullet.Bullet(self.position,
bullet_velocity,
self.effects_group,
self.bullet_damage,
self.bullet_bound_style,
self.bullet_collide_style)
new_bullet.set_owner(self)
if self.reflect_bonus and self.damage_bonus:
new_bullet.animation_list.play('DamageReflect')
elif self.bullet_collide_style == COLLIDE_STYLE_REFLECT:
new_bullet.animation_list.play('Reflect')
elif self.bullet_damage > 1:
new_bullet.animation_list.play('Damage')
self.bullet_group.add(new_bullet)
self.fire_timer = self.reset_fire_timer
if self.dual_shot:
if self.velocity:
bullet_velocity = vector.Vector2d(self.velocity * -1)
bullet_velocity.set_magnitude(self.bullet_speed)
new_bullet = bullet.Bullet((self.position),
(bullet_velocity),
self.effects_group,
self.bullet_damage,
self.bullet_bound_style,
self.bullet_collide_style)
new_bullet.set_owner(self)
if self.reflect_bonus and self.damage_bonus:
new_bullet.animation_list.play('DamageReflect')
elif self.bullet_collide_style == COLLIDE_STYLE_REFLECT: new_bullet.animation_list.play('Reflect')
elif self.bullet_damage > 1: new_bullet.animation_list.play('Damage')
self.bullet_group.add(new_bullet)
def set_velocity(self, new_velocity):
self.velocity = new_velocity
if new_velocity.get_magnitude() > self.max_speed:
self.velocity.set_magnitude(self.max_speed)
| {
"content_hash": "45a4965a4536df643be5afbdc2d82639",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 121,
"avg_line_length": 38.46984126984127,
"alnum_prop": 0.5593332233041756,
"repo_name": "JSkelly/TroubleInCloudLand",
"id": "4a0bb7e9be01bbacf756d56caa45d02afb03c8ea",
"size": "12118",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "core/player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "269961"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
import sys
from maya import OpenMaya, OpenMayaMPx
from murl import MURL
_CACHE = {}
class MUrlResolver(OpenMayaMPx.MPxFileResolver):
fileResolverName = "astipsFileResolver"
uriSchemeName = "astips"
def decode(self, uriValue):
url_string = uriValue.asString()
if url_string not in _CACHE:
murl = MURL(uriValue)
_CACHE[url_string] = murl.real_path
return _CACHE[url_string]
def resolveURI(self, uriValue, mode, ReturnStatus=None):
return self.decode(uriValue)
def resolveURIWithContext(self, uriValue, mode, contextNodeFullName, ReturnStatus=None):
return self.decode(uriValue)
@classmethod
def className(cls):
return cls.__name__
def resolverName(self):
return self.fileResolverName
def uriScheme(self):
return self.uriSchemeName
@classmethod
def resolverCreator(cls):
return cls()
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "astips", "1.0")
try:
mplugin.registerURIFileResolver(MUrlResolver.fileResolverName,
MUrlResolver.uriSchemeName,
MUrlResolver.resolverCreator
)
except:
sys.stderr.write("Error loading")
raise
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterURIFileResolver(MUrlResolver.fileResolverName)
except:
sys.stderr.write("Error removing")
raise
| {
"content_hash": "a8b7fe04db9b8fbfce5d2f0c39b85f22",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 92,
"avg_line_length": 26.864406779661017,
"alnum_prop": 0.632807570977918,
"repo_name": "astips/tk-astips-app-url-resolver",
"id": "9e7f400a6a49811287b548f975cd9d5aecd08365",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "studio/maya/plugins/murlResolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34474"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import ujson
from typing import Any, Dict, List
from six import string_types
from zerver.lib.test_helpers import tornado_redirected_to_list, get_display_recipient
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_user_profile_by_email
class ReactionEmojiTest(ZulipTestCase):
def test_missing_emoji(self):
# type: () -> None
"""
Sending reaction without emoji fails
"""
sender = 'hamlet@zulip.com'
result = self.client_post('/api/v1/reactions', {'message_id': 1},
**self.api_auth(sender))
self.assert_json_error(result, "Missing 'emoji' argument")
def test_empty_emoji(self):
# type: () -> None
"""
Sending empty emoji fails
"""
sender = 'hamlet@zulip.com'
result = self.client_post('/api/v1/reactions', {'message_id': 1, 'emoji': ''},
**self.api_auth(sender))
self.assert_json_error(result, "Emoji '' does not exist")
def test_invalid_emoji(self):
# type: () -> None
"""
Sending invalid emoji fails
"""
sender = 'hamlet@zulip.com'
result = self.client_post('/api/v1/reactions', {'message_id': 1, 'emoji': 'foo'},
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_valid_emoji(self):
# type: () -> None
"""
Reacting with valid emoji succeeds
"""
sender = 'hamlet@zulip.com'
result = self.client_post('/api/v1/reactions', {'message_id': 1, 'emoji': 'smile'},
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
def test_valid_realm_emoji(self):
# type: () -> None
"""
Reacting with valid realm emoji succeeds
"""
sender = 'hamlet@zulip.com'
emoji_name = 'my_emoji'
emoji_data = {'name': emoji_name, 'url': 'https://example.com/my_emoji'}
result = self.client_put('/json/realm/emoji', info=emoji_data,
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
result = self.client_get("/json/realm/emoji", **self.api_auth(sender))
content = ujson.loads(result.content)
self.assert_json_success(result)
self.assertTrue(emoji_name in content["emoji"])
result = self.client_post('/api/v1/reactions', {'message_id': 1, 'emoji': emoji_name},
**self.api_auth(sender))
self.assert_json_success(result)
class ReactionMessageIDTest(ZulipTestCase):
def test_missing_message_id(self):
# type: () -> None
"""
Reacting without a message_id fails
"""
sender = 'hamlet@zulip.com'
result = self.client_post('/api/v1/reactions', {'emoji': 'smile'},
**self.api_auth(sender))
self.assert_json_error(result, "Missing 'message_id' argument")
def test_invalid_message_id(self):
# type: () -> None
"""
Reacting to an invalid message id fails
"""
sender = 'hamlet@zulip.com'
message_id = -1
result = self.client_post('/api/v1/reactions', {'message_id': message_id, 'emoji': 'smile'},
**self.api_auth(sender))
self.assert_json_error(result, "Bad value for 'message_id': " + str(message_id))
def test_inaccessible_message_id(self):
# type: () -> None
"""
Reacting to a inaccessible (for instance, private) message fails
"""
pm_sender = 'hamlet@zulip.com'
pm_recipient = 'othello@zulip.com'
reaction_sender = 'iago@zulip.com'
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
content = ujson.loads(result.content)
pm_id = content['id']
result = self.client_post('/api/v1/reactions', {'message_id': pm_id, 'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_error(result, "Invalid message(s)")
class ReactionTest(ZulipTestCase):
def test_add_existing_reaction(self):
# type: () -> None
"""
Creating the same reaction twice fails
"""
pm_sender = 'hamlet@zulip.com'
pm_recipient = 'othello@zulip.com'
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
first = self.client_post('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_post('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction already exists")
def test_remove_nonexisting_reaction(self):
# type: () -> None
"""
Removing a reaction twice fails
"""
pm_sender = 'hamlet@zulip.com'
pm_recipient = 'othello@zulip.com'
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
add = self.client_post('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_success(add)
first = self.client_delete('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_delete('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction does not exist")
class ReactionEventTest(ZulipTestCase):
def test_add_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = 'hamlet@zulip.com'
pm_recipient = 'othello@zulip.com'
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
content = ujson.loads(result.content)
pm_id = content['id']
expected_recipient_emails = set([pm_sender, pm_recipient])
expected_recipient_ids = set([get_user_profile_by_email(email).id for email in expected_recipient_emails])
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_post('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'add')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
def test_remove_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = 'hamlet@zulip.com'
pm_recipient = 'othello@zulip.com'
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
content = ujson.loads(result.content)
pm_id = content['id']
expected_recipient_emails = set([pm_sender, pm_recipient])
expected_recipient_ids = set([get_user_profile_by_email(email).id for email in expected_recipient_emails])
add = self.client_post('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_success(add)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_delete('/api/v1/reactions', {'message_id': pm_id,
'emoji': 'smile'},
**self.api_auth(reaction_sender))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'remove')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
| {
"content_hash": "f9f02645e153d730eb7008a3e87ce13a",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 114,
"avg_line_length": 42.96498054474708,
"alnum_prop": 0.5134939322586488,
"repo_name": "peguin40/zulip",
"id": "9213ede384ff04c79d23f89c2b9286bc84e64288",
"size": "11066",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/tests/test_reactions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "242820"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "457813"
},
{
"name": "JavaScript",
"bytes": "1393501"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82466"
},
{
"name": "Python",
"bytes": "2991159"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "37195"
}
],
"symlink_target": ""
} |
from flask import Blueprint
api = Blueprint('api', __name__)
###################################
# Add api views here:
###################################
import auth
import users
import organizations
import jobs
import timesheets
import export | {
"content_hash": "558039625ebc5176b65282ccb2df27e6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 35,
"avg_line_length": 16.533333333333335,
"alnum_prop": 0.5483870967741935,
"repo_name": "krrg/gnomon",
"id": "fe67c761bfb9e109bc6f8458c78350a38bf53321",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3175"
},
{
"name": "JavaScript",
"bytes": "17605"
},
{
"name": "Python",
"bytes": "44037"
}
],
"symlink_target": ""
} |
from ..api import narrow_buttons, wide_buttons
from django import template
register = template.Library()
@register.simple_tag
def wide_social_buttons(request, title, url):
return wide_buttons(request, title, url)
@register.simple_tag
def narrow_social_buttons(request, title, url):
return narrow_buttons(request, title, url)
| {
"content_hash": "eef2fcfe9a8f2d43f03e2b55f67c8ca6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 28,
"alnum_prop": 0.7619047619047619,
"repo_name": "umitproject/tease-o-matic",
"id": "b2bcfc50256a8feb211b2ef19b04acdb07816829",
"size": "336",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "simplesocial/templatetags/simplesocial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "107814"
},
{
"name": "Python",
"bytes": "6962452"
},
{
"name": "Ruby",
"bytes": "1987"
}
],
"symlink_target": ""
} |
from neuralyzer.io.loader import LoaderTemplate
FILE_EXTENSIONS = ('txt', )
class Loader(LoaderTemplate):
@staticmethod
def get_data(filepath,):
try:
return get_txt_data(filepath)
except:
raise
# reading files
# -----------------------------------------------------------------------------
def get_txt_data(filepath, splitter='\r\n', dtype=float):
import numpy as np
with open(filepath) as fid:
content = fid.read()
return np.array([dtype(el) for el in content.split(splitter) if el])
| {
"content_hash": "569f79c4728a121184869f934d0dad10",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.5425531914893617,
"repo_name": "michigraber/neuralyzer",
"id": "a98d36a55c43117f95197c9e426be9f293b8c420",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralyzer/io/plugins/txt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88683"
},
{
"name": "Shell",
"bytes": "1210"
}
],
"symlink_target": ""
} |
import logging
_logger = logging.getLogger(__name__)
class VulnerabilitiesRiskAssessor(object):
def __init__(self, whitelist, vulnerabilities):
object.__init__(self)
self.whitelist = whitelist
self.vulnerabilities = vulnerabilities
def assess(self):
"""Returns ```True``` if the risk is deemed acceptable
otherwise returns ```False```.
"""
_logger.info('Assessment starts')
for vulnerability in self.vulnerabilities:
if not self._assess_vulnerability(vulnerability):
_logger.info('Assessment ends - fail')
return False
_logger.info('Assessment ends - pass')
return True
def _assess_vulnerability(self, vulnerability):
"""Returns ```True``` if the risk is deemed acceptable for ```vulnerability```
otherwise returns ```False```.
"""
_logger.info('Assessing vulnerability %s - start', vulnerability)
rv = self.__assess_vulnerability(vulnerability)
_logger.info('Assessing vulnerability %s - finish', vulnerability)
return rv
def __assess_vulnerability(self, vulnerability):
"""Returns ```True``` if the risk is deemed acceptable for ```vulnerability```
otherwise returns ```False```.
"""
if vulnerability.cve_id in self.whitelist.vulnerabilities_by_cve_id:
_logger.info('Vulnerability %s in whitelist - pass', vulnerability)
return True
if self.whitelist.ignore_severities_at_or_below < vulnerability.severity:
_logger.info(
'Vulnerability %s @ severity %s greater than whitelist severity @ %s - fail',
vulnerability,
vulnerability.severity,
self.whitelist.ignore_severities_at_or_below)
return False
else:
_logger.info(
'Vulnerability %s @ severity %s less than or equal to whitelist severity @ %s - pass',
vulnerability,
vulnerability.severity,
self.whitelist.ignore_severities_at_or_below)
return True
| {
"content_hash": "d8663cc4501195a92ea3a96acb72ea2f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 102,
"avg_line_length": 36,
"alnum_prop": 0.6013888888888889,
"repo_name": "simonsdave/clair-cicd",
"id": "cb8fdfdd12fe08f7c2bcda50248a45c2382073ea",
"size": "2160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "clair_cicd/assessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1236"
},
{
"name": "Python",
"bytes": "29720"
},
{
"name": "Ruby",
"bytes": "67"
},
{
"name": "Shell",
"bytes": "45896"
}
],
"symlink_target": ""
} |
"""Script for verifying Syscoin Core release binaries
This script attempts to download the signature file SHA256SUMS.asc from
syscoincore.org and syscoin.org and compares them.
It first checks if the signature passes, and then downloads the files
specified in the file, and checks if the hashes of these files match those
that are specified in the signature file.
The script returns 0 if everything passes the checks. It returns 1 if either
the signature check or the hash check doesn't pass. If an error occurs the
return value is >= 2.
"""
from hashlib import sha256
import os
import subprocess
import sys
from textwrap import indent
WORKINGDIR = "/tmp/syscoin_verify_binaries"
HASHFILE = "hashes.tmp"
HOST1="https://syscoincore.org"
HOST2="https://syscoin.org"
VERSIONPREFIX = "syscoin-core-"
SIGNATUREFILENAME = "SHA256SUMS.asc"
def parse_version_string(version_str):
if version_str.startswith(VERSIONPREFIX): # remove version prefix
version_str = version_str[len(VERSIONPREFIX):]
parts = version_str.split('-')
version_base = parts[0]
version_rc = ""
version_os = ""
if len(parts) == 2: # "<version>-rcN" or "version-platform"
if "rc" in parts[1]:
version_rc = parts[1]
else:
version_os = parts[1]
elif len(parts) == 3: # "<version>-rcN-platform"
version_rc = parts[1]
version_os = parts[2]
return version_base, version_rc, version_os
def download_with_wget(remote_file, local_file=None):
if local_file:
wget_args = ['wget', '-O', local_file, remote_file]
else:
# use timestamping mechanism if local filename is not explicitly set
wget_args = ['wget', '-N', remote_file]
result = subprocess.run(wget_args,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return result.returncode == 0, result.stdout.decode().rstrip()
def files_are_equal(filename1, filename2):
with open(filename1, 'rb') as file1:
contents1 = file1.read()
with open(filename2, 'rb') as file2:
contents2 = file2.read()
return contents1 == contents2
def verify_with_gpg(signature_filename, output_filename):
result = subprocess.run(['gpg', '--yes', '--decrypt', '--output',
output_filename, signature_filename],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return result.returncode, result.stdout.decode().rstrip()
def remove_files(filenames):
for filename in filenames:
os.remove(filename)
def main(args):
# sanity check
if len(args) < 1:
print("Error: need to specify a version on the command line")
return 3
# determine remote dir dependent on provided version string
version_base, version_rc, os_filter = parse_version_string(args[0])
remote_dir = f"/bin/{VERSIONPREFIX}{version_base}/"
if version_rc:
remote_dir += f"test.{version_rc}/"
remote_sigfile = remote_dir + SIGNATUREFILENAME
# create working directory
os.makedirs(WORKINGDIR, exist_ok=True)
os.chdir(WORKINGDIR)
# fetch first signature file
sigfile1 = SIGNATUREFILENAME
success, output = download_with_wget(HOST1 + remote_sigfile, sigfile1)
if not success:
print("Error: couldn't fetch signature file. "
"Have you specified the version number in the following format?")
print(f"[{VERSIONPREFIX}]<version>[-rc[0-9]][-platform] "
f"(example: {VERSIONPREFIX}0.21.0-rc3-osx)")
print("wget output:")
print(indent(output, '\t'))
return 4
# fetch second signature file
sigfile2 = SIGNATUREFILENAME + ".2"
success, output = download_with_wget(HOST2 + remote_sigfile, sigfile2)
if not success:
print("syscoin.org failed to provide signature file, "
"but syscoincore.org did?")
print("wget output:")
print(indent(output, '\t'))
remove_files([sigfile1])
return 5
# ensure that both signature files are equal
if not files_are_equal(sigfile1, sigfile2):
print("syscoin.org and syscoincore.org signature files were not equal?")
print(f"See files {WORKINGDIR}/{sigfile1} and {WORKINGDIR}/{sigfile2}")
return 6
# check signature and extract data into file
retval, output = verify_with_gpg(sigfile1, HASHFILE)
if retval != 0:
if retval == 1:
print("Bad signature.")
elif retval == 2:
print("gpg error. Do you have the Syscoin Core binary release "
"signing key installed?")
print("gpg output:")
print(indent(output, '\t'))
remove_files([sigfile1, sigfile2, HASHFILE])
return 1
# extract hashes/filenames of binaries to verify from hash file;
# each line has the following format: "<hash> <binary_filename>"
with open(HASHFILE, 'r', encoding='utf8') as hash_file:
hashes_to_verify = [
line.split()[:2] for line in hash_file if os_filter in line]
remove_files([HASHFILE])
if not hashes_to_verify:
print("error: no files matched the platform specified")
return 7
# download binaries
for _, binary_filename in hashes_to_verify:
print(f"Downloading {binary_filename}")
download_with_wget(HOST1 + remote_dir + binary_filename)
# verify hashes
offending_files = []
for hash_expected, binary_filename in hashes_to_verify:
with open(binary_filename, 'rb') as binary_file:
hash_calculated = sha256(binary_file.read()).hexdigest()
if hash_calculated != hash_expected:
offending_files.append(binary_filename)
if offending_files:
print("Hashes don't match.")
print("Offending files:")
print('\n'.join(offending_files))
return 1
verified_binaries = [entry[1] for entry in hashes_to_verify]
# clean up files if desired
if len(args) >= 2:
print("Clean up the binaries")
remove_files([sigfile1, sigfile2] + verified_binaries)
else:
print(f"Keep the binaries in {WORKINGDIR}")
print("Verified hashes of")
print('\n'.join(verified_binaries))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "c3ddc56fc729b12f1939c289ff37248d",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 80,
"avg_line_length": 35.14525139664804,
"alnum_prop": 0.6442536957558417,
"repo_name": "syscoin/syscoin",
"id": "9e24f337abbd12ae203b13ac93c70ed761f3184b",
"size": "6500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/verifybinaries/verify.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1285088"
},
{
"name": "C++",
"bytes": "12653307"
},
{
"name": "CMake",
"bytes": "50978"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "30986"
},
{
"name": "JavaScript",
"bytes": "31802"
},
{
"name": "M4",
"bytes": "260893"
},
{
"name": "Makefile",
"bytes": "146223"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2965506"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "56850"
},
{
"name": "Scheme",
"bytes": "25953"
},
{
"name": "Shell",
"bytes": "212830"
},
{
"name": "TypeScript",
"bytes": "10706"
}
],
"symlink_target": ""
} |
"""Tests for the Device Registry."""
import asyncio
from unittest.mock import patch
import asynctest
import pytest
from homeassistant.core import callback
from homeassistant.helpers import device_registry
from tests.common import flush_store, mock_device_registry
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(device_registry.EVENT_DEVICE_REGISTRY_UPDATED, async_capture)
return events
async def test_get_or_create_returns_same_entry(hass, registry, update_events):
"""Make sure we do not duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
sw_version="sw-version",
name="name",
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "11:22:33:66:77:88")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry.identifiers == {("bridgeid", "0123")}
assert entry3.manufacturer == "manufacturer"
assert entry3.model == "model"
assert entry3.name == "name"
assert entry3.sw_version == "sw-version"
await hass.async_block_till_done()
# Only 2 update events. The third entry did not generate any changes.
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
async def test_requirement_for_identifier_or_connection(registry):
"""Make sure we do require some descriptor of device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers=set(),
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections=set(),
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="1234",
connections=set(),
identifiers=set(),
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry
assert entry2
assert entry3 is None
async def test_multiple_config_entries(registry):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry2.config_entries == {"123", "456"}
async def test_loading_from_storage(hass, hass_storage):
"""Test loading stored devices on start."""
hass_storage[device_registry.STORAGE_KEY] = {
"version": device_registry.STORAGE_VERSION,
"data": {
"devices": [
{
"config_entries": ["1234"],
"connections": [["Zigbee", "01.23.45.67.89"]],
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"sw_version": "version",
"area_id": "12345A",
"name_by_user": "Test Friendly Name",
}
]
},
}
registry = await device_registry.async_get_registry(hass)
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == "abcdefghijklm"
assert entry.area_id == "12345A"
assert entry.name_by_user == "Test Friendly Name"
assert isinstance(entry.config_entries, set)
async def test_removing_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
registry.async_clear_config_entry("123")
entry = registry.async_get_device({("bridgeid", "0123")}, set())
entry3_removed = registry.async_get_device({("bridgeid", "4567")}, set())
assert entry.config_entries == {"456"}
assert entry3_removed is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert update_events[3]["action"] == "update"
assert update_events[3]["device_id"] == entry.id
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
async def test_removing_area_id(registry):
"""Make sure we can clear area id."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry_w_area = registry.async_update_device(entry.id, area_id="12345A")
registry.async_clear_area_id("12345A")
entry_wo_area = registry.async_get_device({("bridgeid", "0123")}, set())
assert not entry_wo_area.area_id
assert entry_w_area != entry_wo_area
async def test_specifying_via_device_create(registry):
"""Test specifying a via_device and updating."""
via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id == via.id
async def test_specifying_via_device_update(registry):
"""Test specifying a via_device and updating."""
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id is None
via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id == via.id
async def test_loading_saving_data(hass, registry):
"""Test that we load/save data correctly."""
orig_via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
orig_light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert len(registry.devices) == 2
# Now load written data in new registry
registry2 = device_registry.DeviceRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
# Ensure same order
assert list(registry.devices) == list(registry2.devices)
new_via = registry2.async_get_device({("hue", "0123")}, set())
new_light = registry2.async_get_device({("hue", "456")}, set())
assert orig_via == new_via
assert orig_light == new_light
async def test_no_unnecessary_changes(registry):
"""Make sure we do not consider devices changes."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("ethernet", "12:34:56:78:90:AB:CD:EF")},
identifiers={("hue", "456"), ("bla", "123")},
)
with patch(
"homeassistant.helpers.device_registry.DeviceRegistry.async_schedule_save"
) as mock_save:
entry2 = registry.async_get_or_create(
config_entry_id="1234", identifiers={("hue", "456")}
)
assert entry.id == entry2.id
assert len(mock_save.mock_calls) == 0
async def test_format_mac(registry):
"""Make sure we normalize mac addresses."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for mac in ["123456ABCDEF", "123456abcdef", "12:34:56:ab:cd:ef", "1234.56ab.cdef"]:
test_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, mac)},
)
assert test_entry.id == entry.id, mac
assert test_entry.connections == {
(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:ab:cd:ef")
}
# This should not raise
for invalid in [
"invalid_mac",
"123456ABCDEFG", # 1 extra char
"12:34:56:ab:cdef", # not enough :
"12:34:56:ab:cd:e:f", # too many :
"1234.56abcdef", # not enough .
"123.456.abc.def", # too many .
]:
invalid_mac_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, invalid)},
)
assert list(invalid_mac_entry.connections)[0][1] == invalid
async def test_update(registry):
"""Verify that we can update some attributes of a device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "456"), ("bla", "123")},
)
new_identifiers = {("hue", "654"), ("bla", "321")}
assert not entry.area_id
assert not entry.name_by_user
with patch.object(registry, "async_schedule_save") as mock_save:
updated_entry = registry.async_update_device(
entry.id,
area_id="12345A",
name_by_user="Test Friendly Name",
new_identifiers=new_identifiers,
via_device_id="98765B",
)
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.area_id == "12345A"
assert updated_entry.name_by_user == "Test Friendly Name"
assert updated_entry.identifiers == new_identifiers
assert updated_entry.via_device_id == "98765B"
async def test_update_remove_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
updated_entry = registry.async_update_device(
entry2.id, remove_config_entry_id="123"
)
removed_entry = registry.async_update_device(
entry3.id, remove_config_entry_id="123"
)
assert updated_entry.config_entries == {"456"}
assert removed_entry is None
removed_entry = registry.async_get_device({("bridgeid", "4567")}, set())
assert removed_entry is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert update_events[3]["action"] == "update"
assert update_events[3]["device_id"] == entry.id
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with asynctest.patch(
"homeassistant.helpers.device_registry.DeviceRegistry.async_load"
) as mock_load:
results = await asyncio.gather(
device_registry.async_get_registry(hass),
device_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
| {
"content_hash": "8f3f1cf68e0bb5787e2ddeee8f21e0cd",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 87,
"avg_line_length": 33.69917012448133,
"alnum_prop": 0.6102320999815305,
"repo_name": "Teagan42/home-assistant",
"id": "7f31c32cde3dee3d91ab5107c4769b5b53ec9629",
"size": "16243",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/helpers/test_device_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from typing import Dict, List, Optional
import demistomock as demisto # noqa: F401
import urllib3
from CommonServerPython import * # noqa: F401
# disable insecure warnings
urllib3.disable_warnings()
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
The _http_request() has to allow redirects to support the snort IP blocklist
"""
def build_iterator(self) -> List:
"""Retrieves all entries from the feed.
Returns:
A list of objects, containing the indicators.
"""
result = []
res = self._http_request('GET',
url_suffix='',
full_url=self._base_url,
resp_type='text',
allow_redirects=True
)
# In that case the feed output is in text format, so extracting the indicators from the response requires
# iterating over it's lines solely:
try:
indicators = res.splitlines()
for indicator in indicators:
# Infer the type of the indicator using 'auto_detect_indicator_type(indicator)' function
# (defined in CommonServerPython).
if indicator_type := auto_detect_indicator_type(indicator):
result.append({
'value': indicator,
'type': indicator_type,
'FeedURL': self._base_url
})
except ValueError as err:
demisto.debug(str(err))
raise ValueError(f'Could not parse returned data as indicator. \n\nError massage: {err}')
return result
def test_module(client: Client) -> str:
"""Builds the iterator to check that the feed is accessible.
Args:
client: Client object.
Returns:
Outputs.
"""
fetch_indicators(client=client, limit=1)
return 'ok'
def fetch_indicators(client: Client, tlp_color: Optional[str] = None, feed_tags: List = [], limit: int = -1,) -> List[Dict]:
"""Retrieves indicators from the feed
Args:
client (Client): Client object with request
tlp_color (str): Traffic Light Protocol color
feed_tags (list): tags to assign fetched indicators
limit (int): limit the results
Returns:
Indicators.
"""
iterator = client.build_iterator()
indicators = []
if limit > 0:
iterator = iterator[:limit]
# extract values from iterator
for item in iterator:
value_ = item.get('value')
type_ = item.get('type')
raw_data = {
'value': value_,
'type': type_,
}
# Create an indicator object for each value.
# The object consists of a dictionary with required and optional keys and values, as described below.
for key, value in item.items():
raw_data.update({key: value})
indicator_obj = {
# The indicator value.
'value': value_,
# The indicator type as defined in Cortex XSOAR.
# One can use the FeedIndicatorType class under CommonServerPython to populate this field.
'type': type_,
# The name of the service supplying this feed.
'service': 'Snort IP Blocklist',
# A dictionary that maps values to existing indicator fields defined in Cortex XSOAR.
# One can use this section in order to map custom indicator fields previously defined
# in Cortex XSOAR to their values.
'fields': {},
# A dictionary of the raw data returned from the feed source about the indicator.
'rawJSON': raw_data
}
if feed_tags:
indicator_obj['fields']['tags'] = feed_tags
if tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator_obj)
return indicators
def get_indicators_command(client: Client,
params: Dict[str, str],
args: Dict[str, str]
) -> CommandResults:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
params: demisto.params()
args: demisto.args()
Returns:
Outputs.
"""
limit = arg_to_number(args.get('limit', '10'), arg_name='limit') or 10
tlp_color = params.get('tlp_color')
feed_tags = argToList(params.get('feedTags', ''))
indicators = fetch_indicators(client, tlp_color, feed_tags, limit)
human_readable = tableToMarkdown(f'Indicators from Snort IP Blocklist Feed: (first {limit} indicators)', indicators,
headers=['value', 'type'], headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
readable_output=human_readable,
outputs_prefix='',
outputs_key_field='',
raw_response=indicators,
outputs={},
)
def fetch_indicators_command(client: Client, params: Dict[str, str]) -> List[Dict]:
"""Wrapper for fetching indicators from the feed to the Indicators tab.
Args:
client: Client object with request
params: demisto.params()
Returns:
Indicators.
"""
feed_tags = argToList(params.get('feedTags', ''))
tlp_color = params.get('tlp_color')
indicators = fetch_indicators(client, tlp_color, feed_tags)
return indicators
def main():
"""
main function, parses params and runs command functions
"""
params = demisto.params()
# Get the service API url
base_url = params.get('url')
insecure = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args()
demisto.debug(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=insecure,
proxy=proxy,
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command == 'snort-get-ip-blocklist-indicators':
# This is the command that fetches a limited number of indicators from the feed source
# and displays them in the war room.
return_results(get_indicators_command(client, params, args))
elif command == 'fetch-indicators':
# This is the command that initiates a request to the feed endpoint and create new indicators objects from
# the data fetched. If the integration instance is configured to fetch indicators, then this is the command
# that will be executed at the specified feed fetch interval.
indicators = fetch_indicators_command(client, params)
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| {
"content_hash": "75cb205c0bb7e21f86618757523a44f9",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 124,
"avg_line_length": 35.22119815668203,
"alnum_prop": 0.598586942300144,
"repo_name": "VirusTotal/content",
"id": "307716e62085e8c2379ee8ff8dc20dce0a87fc44",
"size": "7643",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/SnortIPBlocklist/Integrations/FeedSnortIPBlocklist/FeedSnortIPBlocklist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from apps.common.index import index
urlpatterns = \
[
url(r'^$', index),
url(r'^admin/', admin.site.urls),
url(r'^api/v1/', include('instalike.urls_v1', namespace='api_v1')),
url(r'^docs/', include_docs_urls(title='InstaLike')),
]
if settings.DEBUG:
urlpatterns.append(url(r'^rest-framework/', include('rest_framework.urls', namespace='rest_framework')))
urlpatterns.extend(static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))
urlpatterns.extend(static(settings.STATIC_URL, document_root=settings.STATIC_ROOT))
| {
"content_hash": "2f3a4a4b299929ae0e062a4ccbd45377",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 108,
"avg_line_length": 39.3,
"alnum_prop": 0.7175572519083969,
"repo_name": "kamilgregorczyk/instalike",
"id": "532d738a184a79499c0a97f7bb23dc776c96e665",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instalike/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "286"
},
{
"name": "Python",
"bytes": "63452"
},
{
"name": "Shell",
"bytes": "876"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools import find_packages
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
setup(
name='Flasky',
version='0.1.0',
packages=find_packages(),
author='Park Hyunwoo',
author_email='ez.amiryo' '@' 'gmail.com',
maintainer='Park Hyunwoo',
maintainer_email='ez.amiryo' '@' 'gmail.com',
url='http://github.com/lqez/flasky',
description='Lazy man\'s Flask Application',
classifiers=classifiers,
install_requires=[
"Flask >= 0.9",
],
test_suite='flasky.tests.flasky_test',
)
| {
"content_hash": "48364a78efebe67db218c8b7281f9233",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 29.59375,
"alnum_prop": 0.6399155227032735,
"repo_name": "lqez/flasky",
"id": "9a0c14f3cef06cb443428a957b3c787ecabb88a8",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4256"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import daemon
import logging
import netifaces
import time
import redislite
LOG = logging.getLogger('cloudmanager_server')
RDB_FILE = '/var/tmp/cloudmanager.rdb'
STATUS_KEY = 'cloudmanager_server:status'
def get_service_addresses():
listen_addresses = []
for interface in netifaces.interfaces():
if interface in ['docker0']:
continue
addresses = netifaces.ifaddresses(interface).get(netifaces.AF_INET, [])
# Netifaces is returning funny values for ipv6 addresses, disabling for now
# addresses += netifaces.ifaddresses(interface).get(netifaces.AF_INET6, [])
if not addresses:
continue
for address in addresses:
if 'peer' in address.keys():
continue
if address['addr'] in ['::1', '127.0.0.1']:
continue
listen_addresses.append(address['addr'])
return listen_addresses
def run_server(port, rdb_file=None, daemonize=True):
"""
Run the cloudmanager server on the local system
Parameters
==========
port : int, optional
The port number to listen on, default: 18266
rdb_file : str, optional
The redis rdb file to use, default None
"""
if not rdb_file:
rdb_file = RDB_FILE
listen_addresses = get_service_addresses()
if listen_addresses:
print('Cloudmanager service is listening on:', ','.join([addr+':'+str(port) for addr in listen_addresses]))
connection = redislite.StrictRedis(dbfilename=rdb_file, serverconfig={'port': str(port), 'bind': listen_addresses[0]})
else:
connection = redislite.StrictRedis(dbfilename=rdb_file, serverconfig={'port': str(port), 'bind': '127.0.0.1'})
if daemonize:
with daemon.DaemonContext():
monitor_server(rdb_file)
else:
monitor_server(rdb_file)
def monitor_server(rdb_file, ttl=10):
connection = redislite.StrictRedis(dbfilename=rdb_file)
status = 'Running'
connection.setex(STATUS_KEY, ttl, status)
while status != 'quit':
status = connection.get(STATUS_KEY)
status = status.decode()
LOG.debug(f'Status: {status!r}')
if not status or connection.ttl(STATUS_KEY) < 2:
connection.setex(STATUS_KEY, ttl, 'Running')
time.sleep(1)
connection.delete(STATUS_KEY)
connection.shutdown()
return
def quit(rdb_file=None):
"""
Send the quit command to the server if it is running
Parameters
----------
rdb_file : str, optional
The redis rdb_file, default=None
"""
if not rdb_file:
rdb_file = RDB_FILE
retry_count = 10
while retry_count and status(rdb_file):
connection = redislite.StrictRedis(dbfilename=rdb_file)
connection.setex(STATUS_KEY, 10, b'quit')
retry_count -= 1
time.sleep(1)
if retry_count == 0:
raise ValueError('Server shutdown failed')
def status(rdb_file=None):
"""
Print the server status
Parameters
----------
rdb_file : str, optional
The redis rdb_file, default=None
"""
if not rdb_file:
rdb_file = RDB_FILE
connection = redislite.StrictRedis(dbfilename=rdb_file)
status = connection.get(STATUS_KEY)
if status:
return status.decode()
| {
"content_hash": "ccfc47e2cdc0bcf8e3ed06c538041dc1",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 126,
"avg_line_length": 29.839285714285715,
"alnum_prop": 0.6271693596648713,
"repo_name": "dwighthubbard/micropython-cloudmanager",
"id": "ffed2471c7108f1449afed9e880a427aa7a79892",
"size": "3364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmanager/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51203"
}
],
"symlink_target": ""
} |
class MapQuery(object):
"""Query to retrieve complete ways and relations in an area."""
_QUERY_TEMPLATE = "(node({south},{west},{north},{east});<;);"
def __init__(self, south, west, north, east):
"""
Initialize query with given bounding box.
:param bbox Bounding box with limit values in format west, south,
east, north.
"""
self.west = west
self.south = south
self.east = east
self.north = north
def __str__(self):
return self._QUERY_TEMPLATE.format(
west=self.west,
south=self.south,
east=self.east,
north=self.north
)
class WayQuery(object):
"""Query to retrieve a set of ways and their dependent nodes satisfying
the input parameters"""
_QUERY_TEMPLATE = "(way{query_parameters});(._;>;);"
def __init__(self, query_parameters):
"""Initialize a query for a set of ways satisfying the given parameters.
:param query_parameters Overpass QL query parameters"""
self.query_parameters = query_parameters
def __str__(self):
return self._QUERY_TEMPLATE.format(
query_parameters=self.query_parameters
)
| {
"content_hash": "068bf41fda3ce492ffe217272ad3ce31",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 30.024390243902438,
"alnum_prop": 0.5922014622258327,
"repo_name": "willemarcel/overpass-api-python-wrapper",
"id": "58d71fc94572172ecff437403a9e10f3166eecc6",
"size": "1257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "overpass/queries.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10466"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OEmbedVideoPlugin'
db.create_table('cmsplugin_oembedvideoplugin', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=100)),
('width', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('height', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('html', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('aldryn_video', ['OEmbedVideoPlugin'])
def backwards(self, orm):
# Deleting model 'OEmbedVideoPlugin'
db.delete_table('cmsplugin_oembedvideoplugin')
models = {
'aldryn_video.oembedvideoplugin': {
'Meta': {'object_name': 'OEmbedVideoPlugin', 'db_table': "'cmsplugin_oembedvideoplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '100'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
# XXX intellectronica 2015-11-02 The CMSPlugin fields level, lft,
# rght and tree_id have been commented-out in order to allow this
# migration to run in later versions of the CMS where they do not exist.
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
# 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
# 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
# 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
# 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_video']
| {
"content_hash": "376610b2d07f4f039e343312916ac29e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 156,
"avg_line_length": 61.950819672131146,
"alnum_prop": 0.5885154802857899,
"repo_name": "aldryn/aldryn-video",
"id": "b6cfe79ff17b522c0ac54df660fbe70220983a35",
"size": "3803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aldryn_video/south_migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "605"
},
{
"name": "Python",
"bytes": "58197"
},
{
"name": "Shell",
"bytes": "101"
}
],
"symlink_target": ""
} |
import unittest, types
from pyvisdk import Vim
from tests.common import get_options
from pyvisdk.facade.task import TaskManager
def nothing():
pass
def random_string(n):
import random
import string
return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(n))
TASKS = ['hello world',
'I hate VMware',
'This is a very long task name, a very very long name',
'school sucks',
'one more',
'last one']
class Test_Task(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.options = get_options()
cls.vim = Vim(cls.options.server)
cls.vim.login(cls.options.username, cls.options.password)
cls.manager = TaskManager(cls.vim)
cls.obj = cls.vim.getHostSystems()[0]
cls.cleanUpStaleTasks()
@classmethod
def cleanUpStaleTasks(cls):
for task in cls.manager._managed_object.recentTask:
if task.info.state in ['running', 'queued']:
task.SetTaskState('error', None, None)
@classmethod
def tearDownClass(cls):
cls.vim.logout()
def test_task(self):
with self.manager.task(self.obj, TASKS[0]):
pass
def test_task__error(self):
with self.assertRaises(Exception):
with self.manager.task(self.obj, TASKS[1]):
raise Exception()
def test_wrap(self):
task = self.manager.task(self.obj, TASKS[2])
func = task.wraps(nothing)
func()
def test_step(self):
task = self.manager.task(self.obj, TASKS[3])
task.step([nothing, nothing, nothing])
def test_step_manually(self):
with self.manager.task(self.obj, TASKS[4]) as task:
task.update_progress(10)
task.update_progress(20)
task.update_progress(90)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testHosts']
unittest.main()
| {
"content_hash": "ebbde5f8a081e9386582d15f2faed7e4",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 91,
"avg_line_length": 28.602941176470587,
"alnum_prop": 0.6082262210796915,
"repo_name": "xuru/pyvisdk",
"id": "fec6bba9e3063f400cf2468084195dba76bc8a9c",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
"""Tensorflow implementation of LogME for classification.
You, Kaichao, et al. "Logme: Practical assessment of pre-trained models for
transfer learning." International Conference on Machine Learning. PMLR, 2021.
http://proceedings.mlr.press/v139/you21b/you21b.pdf
We base our code on the Optimized Fixed Point Iterations implemented in:
https://github.com/thuml/LogME, proposed in the arxiv 2021 paper (Algorithm 3):
"Ranking and Tuning Pre-trained Models: A New Paradigm of Exploiting Model Hubs"
https://arxiv.org/abs/2110.10545
"""
import math
import tensorflow as tf
from stable_transfer.transferability import transfer_experiment
def truncated_svd(features):
"""Calculate truncated svd."""
s, u, v = tf.linalg.svd(tf.matmul(features, features, transpose_a=True))
s = tf.sqrt(s)
u_times_sigma = tf.matmul(features, v)
rank_indices = tf.where(tf.greater(s, 1e-10))
s = tf.gather_nd(s, rank_indices)
u = tf.squeeze(tf.gather(u_times_sigma, rank_indices, axis=-1)) / s
return s, u
def get_per_class_evidence(d, n, sigma, alpha, beta, res2, m2):
"""Compute the evidence L(alpha, beta) as in the paper (eq. 2)."""
evidence = n * tf.math.log(beta)
evidence += d * tf.math.log(alpha)
evidence -= n * tf.math.log(2 * math.pi)
evidence -= beta * res2
evidence -= alpha * m2
evidence -= tf.reduce_sum(tf.math.log(alpha + beta * sigma))
return 0.5 * evidence
def get_optimized_per_class_params(n, sigma, u, y_c, tol=1e-2, max_iter=11):
"""Compute alpha and beta as in https://arxiv.org/abs/2110.10545 (Alg. 3)."""
x = tf.matmul(u, y_c, transpose_a=True)
x2 = tf.squeeze(x ** 2)
res_x2 = tf.reduce_sum(y_c ** 2) - tf.reduce_sum(x2)
alpha, beta = 1, 1
for _ in range(max_iter):
t = alpha / beta
gamma = tf.reduce_sum(sigma / (sigma + t))
m2 = tf.reduce_sum((sigma * x2 / ((t + sigma) ** 2)))
res2 = tf.reduce_sum(x2 / ((1 + sigma / t) ** 2)) + res_x2
alpha = gamma / (m2 + 1e-5)
beta = (n - gamma) / (res2 + 1e-5)
if tf.abs((alpha / beta) - t) / t <= tol:
break
return alpha, beta, res2, m2
def get_logme_score(features, target_labels):
"""Return the LogME score for classification.
Args:
features: matrix [N, D] of source features obtained from the target data,
where N is the number of datapoints and D their dimensionionality.
target_labels: ground truth target labels of dimension [N, 1].
Returns:
logme: transferability metric score.
"""
d = features.shape[1]
n = features.shape[0]
if d > n:
s, u, = truncated_svd(features)
else:
s, u, _ = tf.linalg.svd(features)
sigma = (s ** 2)
evidences = []
unique_labels, _ = tf.unique(target_labels)
num_target_classes = tf.reduce_max(target_labels) + 1
if num_target_classes != unique_labels.shape[0]:
raise ValueError('Labels need to be in the range [0, num_target_classes).')
one_hot_targets = tf.one_hot(target_labels, depth=num_target_classes)
for label in list(unique_labels):
one_hot_label = tf.one_hot(label, depth=num_target_classes)
y_c = tf.matmul(one_hot_targets, tf.expand_dims(one_hot_label, axis=-1))
alpha, beta, res2, m2 = get_optimized_per_class_params(n, sigma, u, y_c)
evidences.append(
get_per_class_evidence(d, n, sigma, alpha, beta, res2, m2) / n)
logme = tf.reduce_mean(evidences)
return logme
@transfer_experiment.load_or_compute
def get_train_logme(experiment):
"""Compute LogME on the target training data."""
features, labels = experiment.model_output_on_target_train_dataset('features')
logme = get_logme_score(features, labels)
return dict(logme=float(logme))
| {
"content_hash": "e2c5110dc1e7ffef11149e20fe19332f",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 80,
"avg_line_length": 35.1747572815534,
"alnum_prop": 0.6740270494065691,
"repo_name": "google-research/google-research",
"id": "5d05973924effc1cc51a319b21743f8171cd9328",
"size": "4231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stable_transfer/transferability/logme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""Simple text browser for IDLE
"""
from tkinter import Toplevel, Text, TclError,\
HORIZONTAL, VERTICAL, NS, EW, NSEW, NONE, WORD, SUNKEN
from tkinter.ttk import Frame, Scrollbar, Button
from tkinter.messagebox import showerror
from functools import update_wrapper
from idlelib.colorizer import color_config
class AutoHideScrollbar(Scrollbar):
"""A scrollbar that is automatically hidden when not needed.
Only the grid geometry manager is supported.
"""
def set(self, lo, hi):
if float(lo) > 0.0 or float(hi) < 1.0:
self.grid()
else:
self.grid_remove()
super().set(lo, hi)
def pack(self, **kwargs):
raise TclError(f'{self.__class__.__name__} does not support "pack"')
def place(self, **kwargs):
raise TclError(f'{self.__class__.__name__} does not support "place"')
class ScrollableTextFrame(Frame):
"""Display text with scrollbar(s)."""
def __init__(self, master, wrap=NONE, **kwargs):
"""Create a frame for Textview.
master - master widget for this frame
wrap - type of text wrapping to use ('word', 'char' or 'none')
All parameters except for 'wrap' are passed to Frame.__init__().
The Text widget is accessible via the 'text' attribute.
Note: Changing the wrapping mode of the text widget after
instantiation is not supported.
"""
super().__init__(master, **kwargs)
text = self.text = Text(self, wrap=wrap)
text.grid(row=0, column=0, sticky=NSEW)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
# vertical scrollbar
self.yscroll = AutoHideScrollbar(self, orient=VERTICAL,
takefocus=False,
command=text.yview)
self.yscroll.grid(row=0, column=1, sticky=NS)
text['yscrollcommand'] = self.yscroll.set
# horizontal scrollbar - only when wrap is set to NONE
if wrap == NONE:
self.xscroll = AutoHideScrollbar(self, orient=HORIZONTAL,
takefocus=False,
command=text.xview)
self.xscroll.grid(row=1, column=0, sticky=EW)
text['xscrollcommand'] = self.xscroll.set
else:
self.xscroll = None
class ViewFrame(Frame):
"Display TextFrame and Close button."
def __init__(self, parent, contents, wrap='word'):
"""Create a frame for viewing text with a "Close" button.
parent - parent widget for this frame
contents - text to display
wrap - type of text wrapping to use ('word', 'char' or 'none')
The Text widget is accessible via the 'text' attribute.
"""
super().__init__(parent)
self.parent = parent
self.bind('<Return>', self.ok)
self.bind('<Escape>', self.ok)
self.textframe = ScrollableTextFrame(self, relief=SUNKEN, height=700)
text = self.text = self.textframe.text
text.insert('1.0', contents)
text.configure(wrap=wrap, highlightthickness=0, state='disabled')
color_config(text)
text.focus_set()
self.button_ok = button_ok = Button(
self, text='Close', command=self.ok, takefocus=False)
self.textframe.pack(side='top', expand=True, fill='both')
button_ok.pack(side='bottom')
def ok(self, event=None):
"""Dismiss text viewer dialog."""
self.parent.destroy()
class ViewWindow(Toplevel):
"A simple text viewer dialog for IDLE."
def __init__(self, parent, title, contents, modal=True, wrap=WORD,
*, _htest=False, _utest=False):
"""Show the given text in a scrollable window with a 'close' button.
If modal is left True, users cannot interact with other windows
until the textview window is closed.
parent - parent of this dialog
title - string which is title of popup dialog
contents - text to display in dialog
wrap - type of text wrapping to use ('word', 'char' or 'none')
_htest - bool; change box location when running htest.
_utest - bool; don't wait_window when running unittest.
"""
super().__init__(parent)
self['borderwidth'] = 5
# Place dialog below parent if running htest.
x = parent.winfo_rootx() + 10
y = parent.winfo_rooty() + (10 if not _htest else 100)
self.geometry(f'=750x500+{x}+{y}')
self.title(title)
self.viewframe = ViewFrame(self, contents, wrap=wrap)
self.protocol("WM_DELETE_WINDOW", self.ok)
self.button_ok = button_ok = Button(self, text='Close',
command=self.ok, takefocus=False)
self.viewframe.pack(side='top', expand=True, fill='both')
self.is_modal = modal
if self.is_modal:
self.transient(parent)
self.grab_set()
if not _utest:
self.wait_window()
def ok(self, event=None):
"""Dismiss text viewer dialog."""
if self.is_modal:
self.grab_release()
self.destroy()
def view_text(parent, title, contents, modal=True, wrap='word', _utest=False):
"""Create text viewer for given text.
parent - parent of this dialog
title - string which is the title of popup dialog
contents - text to display in this dialog
wrap - type of text wrapping to use ('word', 'char' or 'none')
modal - controls if users can interact with other windows while this
dialog is displayed
_utest - bool; controls wait_window on unittest
"""
return ViewWindow(parent, title, contents, modal, wrap=wrap, _utest=_utest)
def view_file(parent, title, filename, encoding, modal=True, wrap='word',
_utest=False):
"""Create text viewer for text in filename.
Return error message if file cannot be read. Otherwise calls view_text
with contents of the file.
"""
try:
with open(filename, 'r', encoding=encoding) as file:
contents = file.read()
except OSError:
showerror(title='File Load Error',
message=f'Unable to load file {filename!r} .',
parent=parent)
except UnicodeDecodeError as err:
showerror(title='Unicode Decode Error',
message=str(err),
parent=parent)
else:
return view_text(parent, title, contents, modal, wrap=wrap,
_utest=_utest)
return None
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_textview', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(ViewWindow)
| {
"content_hash": "729b33045b3f757d83183bb240aa5c39",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 35.30927835051546,
"alnum_prop": 0.5963503649635037,
"repo_name": "batermj/algorithm-challenger",
"id": "808a2aefab4f71a3306413cad3abb024a85900fd",
"size": "6850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/idlelib/textview.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from abc import abstractmethod, abstractproperty
from builtins import zip
from future.utils import binary_type, text_type
from twitter.common.collections import OrderedSet
from pants.util.collections_abc_backport import Iterable, OrderedDict, namedtuple
from pants.util.memo import memoized_classproperty
from pants.util.meta import AbstractClass, classproperty
from pants.util.strutil import pluralize
class TypeCheckError(TypeError):
# TODO: make some wrapper exception class to make this kind of
# prefixing easy (maybe using a class field format string?).
def __init__(self, type_name, msg, *args, **kwargs):
formatted_msg = "type check error in class {}: {}".format(type_name, msg)
super(TypeCheckError, self).__init__(formatted_msg, *args, **kwargs)
# TODO: remove the `.type_check_error_type` property in `DatatypeMixin` and just have mixers
# override a class object!
class TypedDatatypeInstanceConstructionError(TypeCheckError):
"""Raised when a datatype()'s fields fail a type check upon construction."""
class DatatypeMixin(AbstractClass):
"""Decouple datatype logic from the way it's created to ease migration to python 3 dataclasses."""
@classproperty
@abstractmethod
def type_check_error_type(cls):
"""The exception type to use in make_type_error()."""
@classmethod
def make_type_error(cls, msg, *args, **kwargs):
"""A helper method to generate an exception type for type checking errors.
This method uses `cls.type_check_error_type` to ensure that type checking errors can be caught
with a reliable exception type. The type returned by `cls.type_check_error_type` should ensure
that the exception messages are prefixed with enough context to be useful and *not* confusing.
"""
return cls.type_check_error_type(cls.__name__, msg, *args, **kwargs)
@abstractmethod
def copy(self, **kwargs):
"""Return a new object of the same type, replacing specified fields with new values"""
# TODO(#7074): Migrate to python 3 dataclasses!
def datatype(field_decls, superclass_name=None, **kwargs):
"""A wrapper for `namedtuple` that accounts for the type of the object in equality.
Field declarations can be a string, which declares a field with that name and
no type checking. Field declarations can also be a tuple `('field_name',
field_type)`, which declares a field named `field_name` which is type-checked
at construction. If a type is given, the value provided to the constructor for
that field must be exactly that type (i.e. `type(x) == field_type`), and not
e.g. a subclass.
:param field_decls: Iterable of field declarations.
:return: A type object which can then be subclassed.
:raises: :class:`TypeError`
"""
field_names = []
fields_with_constraints = OrderedDict()
for maybe_decl in field_decls:
# ('field_name', type)
if isinstance(maybe_decl, tuple):
field_name, type_spec = maybe_decl
if isinstance(type_spec, type):
type_constraint = Exactly(type_spec)
elif isinstance(type_spec, TypeConstraint):
type_constraint = type_spec
else:
raise TypeError(
"type spec for field '{}' was not a type or TypeConstraint: was {!r} (type {!r})."
.format(field_name, type_spec, type(type_spec).__name__))
fields_with_constraints[field_name] = type_constraint
else:
# interpret it as a field name without a type to check
field_name = maybe_decl
# namedtuple() already checks field uniqueness
field_names.append(field_name)
if not superclass_name:
superclass_name = '_anonymous_namedtuple_subclass'
namedtuple_cls = namedtuple(superclass_name, field_names, **kwargs)
class DataType(namedtuple_cls, DatatypeMixin):
type_check_error_type = TypedDatatypeInstanceConstructionError
def __new__(cls, *args, **kwargs):
# TODO: Ideally we could execute this exactly once per `cls` but it should be a
# relatively cheap check.
if not hasattr(cls.__eq__, '_eq_override_canary'):
raise cls.make_type_error('Should not override __eq__.')
try:
this_object = super(DataType, cls).__new__(cls, *args, **kwargs)
except TypeError as e:
raise cls.make_type_error(
"error in namedtuple() base constructor: {}".format(e))
# TODO: Make this kind of exception pattern (filter for errors then display them all at once)
# more ergonomic.
type_failure_msgs = []
for field_name, field_constraint in fields_with_constraints.items():
field_value = getattr(this_object, field_name)
try:
field_constraint.validate_satisfied_by(field_value)
except TypeConstraintError as e:
type_failure_msgs.append(
"field '{}' was invalid: {}".format(field_name, e))
if type_failure_msgs:
raise cls.make_type_error(
'{} type checking constructor arguments:\n{}'
.format(pluralize(len(type_failure_msgs), 'error'),
'\n'.join(type_failure_msgs)))
return this_object
def __eq__(self, other):
if self is other:
return True
# Compare types and fields.
if type(self) != type(other):
return False
# Explicitly return super.__eq__'s value in case super returns NotImplemented
return super(DataType, self).__eq__(other)
# We define an attribute on the `cls` level definition of `__eq__` that will allow us to detect
# that it has been overridden.
__eq__._eq_override_canary = None
def __ne__(self, other):
return not (self == other)
# NB: in Python 3, whenever __eq__ is overridden, __hash__() must also be
# explicitly implemented, otherwise Python will raise "unhashable type". See
# https://docs.python.org/3/reference/datamodel.html#object.__hash__.
def __hash__(self):
try:
return super(DataType, self).__hash__()
except TypeError:
# If any fields are unhashable, we want to be able to specify which ones in the error
# message, but we don't want to slow down the normal __hash__ code path, so we take the time
# to break it down by field if we know the __hash__ fails for some reason.
for field_name, value in self._asdict().items():
try:
hash(value)
except TypeError as e:
raise TypeError("For datatype object {} (type '{}'): in field '{}': {}"
.format(self, type(self).__name__, field_name, e))
# If the error doesn't seem to be with hashing any of the fields, just re-raise the
# original error.
raise
# NB: As datatype is not iterable, we need to override both __iter__ and all of the
# namedtuple methods that expect self to be iterable.
def __iter__(self):
raise self.make_type_error("datatype object is not iterable")
def _super_iter(self):
return super(DataType, self).__iter__()
def _asdict(self):
"""Return a new OrderedDict which maps field names to their values.
Overrides a namedtuple() method which calls __iter__.
"""
return OrderedDict(zip(self._fields, self._super_iter()))
def _replace(self, **kwargs):
"""Return a new datatype object replacing specified fields with new values.
Overrides a namedtuple() method which calls __iter__.
"""
field_dict = self._asdict()
field_dict.update(**kwargs)
return type(self)(**field_dict)
def copy(self, **kwargs):
return self._replace(**kwargs)
# NB: it is *not* recommended to rely on the ordering of the tuple returned by this method.
def __getnewargs__(self):
"""Return self as a plain tuple. Used by copy and pickle."""
return tuple(self._super_iter())
def __repr__(self):
args_formatted = []
for field_name in field_names:
field_value = getattr(self, field_name)
args_formatted.append("{}={!r}".format(field_name, field_value))
return '{class_name}({args_joined})'.format(
class_name=type(self).__name__,
args_joined=', '.join(args_formatted))
def __str__(self):
elements_formatted = []
for field_name in field_names:
constraint_for_field = fields_with_constraints.get(field_name, None)
field_value = getattr(self, field_name)
if not constraint_for_field:
elements_formatted.append(
# TODO: consider using the repr of arguments in this method.
"{field_name}={field_value}"
.format(field_name=field_name,
field_value=field_value))
else:
elements_formatted.append(
"{field_name}<{type_constraint}>={field_value}"
.format(field_name=field_name,
type_constraint=constraint_for_field,
field_value=field_value))
return '{class_name}({typed_tagged_elements})'.format(
class_name=type(self).__name__,
typed_tagged_elements=', '.join(elements_formatted))
# Return a new type with the given name, inheriting from the DataType class
# just defined, with an empty class body.
try: # Python3
return type(superclass_name, (DataType,), {})
except TypeError: # Python2
return type(superclass_name.encode('utf-8'), (DataType,), {})
class EnumVariantSelectionError(TypeCheckError):
"""Raised when an invalid variant for an enum() is constructed or matched against."""
# TODO: look into merging this with pants.util.meta.Singleton!
class ChoicesMixin(AbstractClass):
"""A mixin which declares that the type has a fixed set of possible instances."""
@classproperty
@abstractproperty
def all_variants(cls):
"""Return an iterable containing a de-duplicated list of all possible instances of this type."""
def enum(all_values):
"""A datatype which can take on a finite set of values. This method is experimental and unstable.
Any enum subclass can be constructed with its create() classmethod. This method will use the first
element of `all_values` as the default value, but enum classes can override this behavior by
setting `default_value` in the class body.
If `all_values` contains only strings, then each variant is made into an attribute on the
generated enum class object. This allows code such as the following:
class MyResult(enum(['success', 'not-success'])):
pass
MyResult.success # The same as: MyResult('success')
MyResult.not_success # The same as: MyResult('not-success')
Note that like with option names, hyphenated ('-') enum values are converted into attribute names
with underscores ('_').
:param Iterable all_values: A nonempty iterable of objects representing all possible values for
the enum. This argument must be a finite, non-empty iterable with
unique values.
:raises: :class:`ValueError`
"""
# namedtuple() raises a ValueError if you try to use a field with a leading underscore.
field_name = 'value'
# This call to list() will eagerly evaluate any `all_values` which would otherwise be lazy, such
# as a generator.
all_values_realized = list(all_values)
unique_values = OrderedSet(all_values_realized)
if len(unique_values) == 0:
raise ValueError("all_values must be a non-empty iterable!")
elif len(unique_values) < len(all_values_realized):
raise ValueError("When converting all_values ({}) to a set, at least one duplicate "
"was detected. The unique elements of all_values were: {}."
.format(all_values_realized, list(unique_values)))
class ChoiceDatatype(datatype([field_name]), ChoicesMixin):
# Overriden from datatype() so providing an invalid variant is catchable as a TypeCheckError,
# but more specific.
type_check_error_type = EnumVariantSelectionError
@memoized_classproperty
def _singletons(cls):
"""Generate memoized instances of this enum wrapping each of this enum's allowed values.
NB: The implementation of enum() should use this property as the source of truth for allowed
values and enum instances from those values.
"""
return OrderedDict((value, cls._make_singleton(value)) for value in all_values_realized)
@classmethod
def _make_singleton(cls, value):
"""
We convert uses of the constructor to call create(), so we then need to go around __new__ to
bootstrap singleton creation from datatype()'s __new__.
"""
return super(ChoiceDatatype, cls).__new__(cls, value)
@classproperty
def _allowed_values(cls):
"""The values provided to the enum() type constructor, for use in error messages."""
return list(cls._singletons.keys())
def __new__(cls, value):
"""Create an instance of this enum.
:param value: Use this as the enum value. If `value` is an instance of this class, return it,
otherwise it is checked against the enum's allowed values.
"""
if isinstance(value, cls):
return value
if value not in cls._singletons:
raise cls.make_type_error(
"Value {!r} must be one of: {!r}."
.format(value, cls._allowed_values))
return cls._singletons[value]
# TODO: figure out if this will always trigger on primitives like strings, and what situations
# won't call this __eq__ (and therefore won't raise like we want). Also look into whether there
# is a way to return something more conventional like `NotImplemented` here that maintains the
# extra caution we're looking for.
def __eq__(self, other):
"""Redefine equality to avoid accidentally comparing against a non-enum."""
if other is None:
return False
if type(self) != type(other):
raise self.make_type_error(
"when comparing {!r} against {!r} with type '{}': "
"enum equality is only defined for instances of the same enum class!"
.format(self, other, type(other).__name__))
return super(ChoiceDatatype, self).__eq__(other)
# Redefine the canary so datatype __new__ doesn't raise.
__eq__._eq_override_canary = None
# NB: as noted in datatype(), __hash__ must be explicitly implemented whenever __eq__ is
# overridden. See https://docs.python.org/3/reference/datamodel.html#object.__hash__.
def __hash__(self):
return super(ChoiceDatatype, self).__hash__()
def resolve_for_enum_variant(self, mapping):
"""Return the object in `mapping` with the key corresponding to the enum value.
`mapping` is a dict mapping enum variant value -> arbitrary object. All variant values must be
provided.
NB: The objects in `mapping` should be made into lambdas if lazy execution is desired, as this
will "evaluate" all of the values in `mapping`.
"""
keys = frozenset(mapping.keys())
if keys != frozenset(self._allowed_values):
raise self.make_type_error(
"pattern matching must have exactly the keys {} (was: {})"
.format(self._allowed_values, list(keys)))
match_for_variant = mapping[self.value]
return match_for_variant
@classproperty
def all_variants(cls):
"""Iterate over all instances of this enum, in the declared order.
NB: resolve_for_enum_variant() should be used instead of this method for performing
conditional logic based on an enum instance's value.
"""
return cls._singletons.values()
# Python requires creating an explicit closure to save the value on each loop iteration.
accessor_generator = lambda case: lambda cls: cls(case)
for case in all_values_realized:
if _string_type_constraint.satisfied_by(case):
accessor = classproperty(accessor_generator(case))
attr_name = re.sub(r'-', '_', case)
setattr(ChoiceDatatype, attr_name, accessor)
return ChoiceDatatype
# TODO: make this error into an attribute on the `TypeConstraint` class object!
class TypeConstraintError(TypeError):
"""Indicates a :class:`TypeConstraint` violation."""
class TypeConstraint(AbstractClass):
"""Represents a type constraint.
Not intended for direct use; instead, use one of :class:`SuperclassesOf`, :class:`Exactly` or
:class:`SubclassesOf`.
"""
def __init__(self, description):
"""Creates a type constraint centered around the given types.
The type constraint is satisfied as a whole if satisfied for at least one of the given types.
:param str description: A concise, readable description of what the type constraint represents.
Used directly as the __str__ implementation.
"""
self._description = description
@abstractmethod
def satisfied_by(self, obj):
"""Return `True` if the given object satisfies this type constraint.
:rtype: bool
"""
def make_type_constraint_error(self, obj, constraint):
return TypeConstraintError(
"value {!r} (with type {!r}) must satisfy this type constraint: {}."
.format(obj, type(obj).__name__, constraint))
# TODO: disallow overriding this method with some form of mixin/decorator along with datatype
# __eq__!
def validate_satisfied_by(self, obj):
"""Return `obj` if the object satisfies this type constraint, or raise.
:raises: `TypeConstraintError` if `obj` does not satisfy the constraint.
"""
if self.satisfied_by(obj):
return obj
raise self.make_type_constraint_error(obj, self)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self._description
class TypeOnlyConstraint(TypeConstraint):
"""A `TypeConstraint` predicated only on the object's type.
`TypeConstraint` subclasses may override `.satisfied_by()` to perform arbitrary validation on the
object itself -- however, this class implements `.satisfied_by()` with a guarantee that it will
only act on the object's `type` via `.satisfied_by_type()`. This kind of type checking is faster
and easier to understand than the more complex validation allowed by `.satisfied_by()`.
"""
def __init__(self, *types):
"""Creates a type constraint based on some logic to match the given types.
NB: A `TypeOnlyConstraint` implementation should ensure that the type constraint is satisfied as
a whole if satisfied for at least one of the given `types`.
:param type *types: The types this constraint will match in some way.
"""
if not types:
raise ValueError('Must supply at least one type')
if any(not isinstance(t, type) for t in types):
raise TypeError('Supplied types must be types. {!r}'.format(types))
if len(types) == 1:
type_list = types[0].__name__
else:
type_list = ' or '.join(t.__name__ for t in types)
description = '{}({})'.format(type(self).__name__, type_list)
super(TypeOnlyConstraint, self).__init__(description=description)
# NB: This is made into a tuple so that we can use self._types in issubclass() and others!
self._types = tuple(types)
# TODO(#7114): remove this after the engine is converted to use `TypeId` instead of
# `TypeConstraint`!
@property
def types(self):
return self._types
@abstractmethod
def satisfied_by_type(self, obj_type):
"""Return `True` if the given object satisfies this type constraint.
:rtype: bool
"""
def satisfied_by(self, obj):
return self.satisfied_by_type(type(obj))
def __hash__(self):
return hash((type(self), self._types))
def __eq__(self, other):
return type(self) == type(other) and self._types == other._types
def __repr__(self):
constrained_type = ', '.join(t.__name__ for t in self._types)
return ('{type_constraint_type}({constrained_type})'
.format(type_constraint_type=type(self).__name__,
constrained_type=constrained_type))
class SuperclassesOf(TypeOnlyConstraint):
"""Objects of the exact type as well as any super-types are allowed."""
def satisfied_by_type(self, obj_type):
return any(issubclass(t, obj_type) for t in self._types)
class Exactly(TypeOnlyConstraint):
"""Only objects of the exact type are allowed."""
def satisfied_by_type(self, obj_type):
return obj_type in self._types
def graph_str(self):
if len(self.types) == 1:
return self.types[0].__name__
else:
return repr(self)
class SubclassesOf(TypeOnlyConstraint):
"""Objects of the exact type as well as any sub-types are allowed."""
def satisfied_by_type(self, obj_type):
return issubclass(obj_type, self._types)
_string_type_constraint = SubclassesOf(binary_type, text_type)
class TypedCollection(TypeConstraint):
"""A `TypeConstraint` which accepts a TypeOnlyConstraint and validates a collection."""
@memoized_classproperty
def iterable_constraint(cls):
"""Define what kind of collection inputs are accepted by this type constraint.
:rtype: TypeConstraint
"""
return SubclassesOf(Iterable)
# TODO: extend TypeConstraint to specify includes and excludes in a single constraint!
@classproperty
def exclude_iterable_constraint(cls):
"""Define what collection inputs are *not* accepted by this type constraint.
Strings in Python are considered iterables of substrings, but we only want to allow explicit
collection types.
:rtype: TypeConstraint
"""
return _string_type_constraint
def __init__(self, constraint):
"""Create a `TypeConstraint` which validates each member of a collection with `constraint`.
:param TypeOnlyConstraint constraint: the `TypeConstraint` to apply to each element. This is
currently required to be a `TypeOnlyConstraint` to avoid
complex prototypal type relationships.
"""
if not isinstance(constraint, TypeOnlyConstraint):
raise TypeError("constraint for collection must be a {}! was: {}"
.format(TypeOnlyConstraint.__name__, constraint))
description = '{}({})'.format(type(self).__name__, constraint)
self._constraint = constraint
super(TypedCollection, self).__init__(description=description)
def _is_iterable(self, obj):
return (self.iterable_constraint.satisfied_by(obj)
and not self.exclude_iterable_constraint.satisfied_by(obj))
# TODO: consider making this a private method of TypeConstraint, as it now duplicates the logic in
# self.validate_satisfied_by()!
def satisfied_by(self, obj):
return (self._is_iterable(obj)
and all(self._constraint.satisfied_by(el) for el in obj))
def make_collection_type_constraint_error(self, base_obj, el):
base_error = self.make_type_constraint_error(el, self._constraint)
return TypeConstraintError("in wrapped constraint {} matching iterable object {}: {}"
.format(self, base_obj, base_error))
def validate_satisfied_by(self, obj):
if not self._is_iterable(obj):
base_iterable_error = self.make_type_constraint_error(obj, self.iterable_constraint)
raise TypeConstraintError(
"in wrapped constraint {}: {}\nNote that objects matching {} are not considered iterable."
.format(self, base_iterable_error, self.exclude_iterable_constraint))
for el in obj:
if not self._constraint.satisfied_by(el):
raise self.make_collection_type_constraint_error(obj, el)
return obj
def __hash__(self):
return hash((type(self), self._constraint))
def __eq__(self, other):
return type(self) == type(other) and self._constraint == other._constraint
def __repr__(self):
return ('{type_constraint_type}({constraint!r})'
.format(type_constraint_type=type(self).__name__,
constraint=self._constraint))
# TODO(#6742): Useful type constraints for datatype fields before we start using mypy type hints!
hashable_collection_constraint = Exactly(tuple)
class HashableTypedCollection(TypedCollection):
iterable_constraint = hashable_collection_constraint
string_type = Exactly(text_type)
string_list = TypedCollection(string_type)
string_optional = Exactly(text_type, type(None))
hashable_string_list = HashableTypedCollection(string_type)
| {
"content_hash": "eb21e046d59e83393e91c135e40e3334",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 100,
"avg_line_length": 38.802547770700635,
"alnum_prop": 0.6721930400525279,
"repo_name": "twitter/pants",
"id": "52ebfa3f5edf4e0f74d6e2a34d9d17161711b4f3",
"size": "24515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/util/objects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import logging
import time
from threading import get_ident as get_thread_ident
from typing import Any, Callable, Dict, Tuple, TypeVar, Union
import cothread
from malcolm.compat import get_stack_size
from .errors import TimeoutError
T = TypeVar("T")
# Make a module level logger
log = logging.getLogger(__name__)
# Re-export
sleep = cothread.Sleep
RLock = cothread.RLock
class Spawned:
"""Internal object keeping track of a spawned function"""
NO_RESULT = object()
def __init__(self, func: Callable[..., Any], args: Tuple, kwargs: Dict) -> None:
self._result_queue = Queue()
self._result: Union[Any, Exception] = self.NO_RESULT
self._function = func
self._args = args
self._kwargs = kwargs
cothread.Spawn(self.catching_function, stack_size=get_stack_size())
def catching_function(self):
try:
self._result = self._function(*self._args, **self._kwargs)
except Exception as e:
log.debug(
"Exception calling %s(*%s, **%s)",
self._function,
self._args,
self._kwargs,
exc_info=True,
)
self._result = e
# We finished running the function, so remove the reference to it
# in case it's stopping garbage collection
self._function = None
self._args = None
self._kwargs = None
self._result_queue.put(None)
def wait(self, timeout: float = None) -> None:
# Only one person can wait on this at a time
if self._result == self.NO_RESULT:
self._result_queue.get(timeout)
def ready(self) -> bool:
"""Return True if the spawned result has returned or errored"""
return self._result != self.NO_RESULT
def get(self, timeout: float = None) -> T:
"""Return the result or raise the error the function has produced"""
self.wait(timeout)
if isinstance(self._result, Exception):
raise self._result
return self._result
class Queue:
"""Threadsafe and cothreadsafe queue with gets in calling thread"""
def __init__(self):
if get_thread_ident() == cothread.scheduler_thread_id:
self._event_queue = cothread.EventQueue()
else:
self._event_queue = cothread.ThreadedEventQueue()
def get(self, timeout=None):
# In cothread's thread
start = time.time()
remaining_timeout = timeout
while remaining_timeout is None or remaining_timeout >= 0:
try:
return self._event_queue.Wait(timeout=remaining_timeout)
except cothread.Timedout:
if timeout is not None:
remaining_timeout = start + timeout - time.time()
if remaining_timeout < 0:
raise TimeoutError("Queue().get() timed out")
raise TimeoutError("Queue().get() given negative timeout")
def put(self, value):
# In cothread's thread
self._event_queue.Signal(value)
| {
"content_hash": "c76593ff2500b9adcb12aa4a1c127b30",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 84,
"avg_line_length": 31.75257731958763,
"alnum_prop": 0.5941558441558441,
"repo_name": "dls-controls/pymalcolm",
"id": "621becca925b5561305509530301bb39773b085b",
"size": "3080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malcolm/core/concurrency.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "549"
},
{
"name": "Python",
"bytes": "1583458"
},
{
"name": "Shell",
"bytes": "580"
}
],
"symlink_target": ""
} |
from tacker.sol_refactored.api import api_version
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.tests import base
class TestAPIVersion(base.BaseTestCase):
def test_init_null(self):
vers = api_version.APIVersion()
self.assertTrue(vers.is_null())
def test_init(self):
supported_versions = ["3.1.4159", "2.0.0", "2.1.0"]
for vers, vers_str in [("2.0.0", "2.0.0"),
("2.1.0", "2.1.0"),
("3.1.4159", "3.1.4159"),
("2.0.0-impl:foobar", "2.0.0")]:
v = api_version.APIVersion(vers, supported_versions)
self.assertEqual(str(v), vers_str)
def test_init_exceptions(self):
supported_versions = ["2.0.0"]
self.assertRaises(sol_ex.APIVersionMissing,
api_version.APIVersion, None, supported_versions)
self.assertRaises(sol_ex.InvalidAPIVersionString,
api_version.APIVersion,
"2.0.0-abc:foobar",
["2.0.0"])
self.assertRaises(sol_ex.InvalidAPIVersionString,
api_version.APIVersion, "0.1.2", supported_versions)
self.assertRaises(sol_ex.APIVersionNotSupported,
api_version.APIVersion, "9.9.9", supported_versions)
def test_compare(self):
supported_versions = ["1.3.0", "1.3.1", "2.0.0"]
self.assertTrue(api_version.APIVersion("1.3.0", supported_versions) <
api_version.APIVersion("1.3.1", supported_versions))
self.assertTrue(api_version.APIVersion("2.0.0", supported_versions) >
api_version.APIVersion("1.3.1", supported_versions))
def test_matches(self):
supported_versions = ["1.3.0", "1.3.1", "2.0.0"]
vers = api_version.APIVersion("1.3.1")
self.assertTrue(
vers.matches(api_version.APIVersion(), api_version.APIVersion())
)
self.assertTrue(
vers.matches(api_version.APIVersion("1.3.0", supported_versions),
api_version.APIVersion()))
self.assertTrue(
vers.matches(api_version.APIVersion(),
api_version.APIVersion("2.0.0", supported_versions)))
self.assertTrue(
vers.matches(api_version.APIVersion("1.3.0", supported_versions),
api_version.APIVersion("2.0.0", supported_versions)))
| {
"content_hash": "eab695879ba4f42454fb8858ca67028a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 40.725806451612904,
"alnum_prop": 0.5552475247524753,
"repo_name": "openstack/tacker",
"id": "3a96e869fdc7f696610e8b18dc75f34dfb6546d6",
"size": "3187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/tests/unit/sol_refactored/api/test_api_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "10809"
},
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "7648075"
},
{
"name": "Ruby",
"bytes": "2841"
},
{
"name": "Shell",
"bytes": "61750"
},
{
"name": "Smarty",
"bytes": "3624"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import random
import string
from unittest import TestCase
from mock import patch
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticdata.manager import (
without,
group,
PersistedEntity,
EntityManager,
UPDATE,
REMOVE,
ADD,
RepositoryError,
EntityNotFound
)
from elasticdata import Type, TimestampedType
class ManagerTestType(Type):
class Meta:
scopes = {
'all': ('foo', 'bar'),
'small': ('foo', )
}
class ManagerCallbacksTestType(Type):
def pre_create(self, em):
self['pre_create'] = self.get('foo', None)
def pre_update(self, em):
self['pre_update'] = self.get('foo', None)
def pre_delete(self, em):
pass
def post_create(self, em):
pass
def post_update(self, em):
pass
def post_delete(self, em):
pass
class HelpersTestCase(TestCase):
def test_without(self):
self.assertDictEqual({'key1': 1, 'key2': 2}, without(['key3'], {'key1': 1, 'key2': 2, 'key3': 3}))
self.assertDictEqual({'key1': 1, 'key2': 2, 'key4': 4},
without(['key3'],
{'key1': 1, 'key2': 2, 'key3': {'key4': 4}},
move_up={'key3': ['key4']}))
def test_group(self):
type_getter = lambda item: item['type']
data = [{'type': 'a'}, {'type': 'a'}, {'type': 'b'}]
grouped_data = group(data, type_getter)
self.assertEqual(len(grouped_data['a']), 2)
self.assertEqual(len(grouped_data['b']), 1)
self.assertListEqual(sorted(grouped_data.keys()), ['a', 'b'])
class PersistedEntityTestCase(TestCase):
def test_new_entity(self):
e = ManagerTestType({'foo': 'bar'})
pe = PersistedEntity(e)
self.assertIs(e._persisted_entity, pe)
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt, {'_index': 'default', '_source': {'foo': 'bar'},
'_type': 'manager_test_type', '_op_type': 'create'})
e = ManagerTestType({'foo': 'bar', 'id': 1})
pe = PersistedEntity(e)
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt,
{'_index': 'default', '_source': {'foo': 'bar'}, '_type': 'manager_test_type',
'_id': 1, '_op_type': 'create'})
e = ManagerTestType({'foo': 'bar', 'id': 1, '_parent': '2'})
pe = PersistedEntity(e)
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt,
{'_index': 'default', '_source': {'foo': 'bar'},
'_type': 'manager_test_type', '_id': 1, '_parent': '2', '_op_type': 'create'})
self.assertEqual(e.diff, {'foo': 'bar'})
def test_update_entity(self):
e = ManagerTestType({'foo': 'bar', 'id': 1})
pe = PersistedEntity(e, state=UPDATE)
self.assertFalse(pe.is_action_needed())
e['bar'] = 'baz'
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt, {
'_id': 1,
'_index': 'default',
'_op_type': 'update',
'_type': 'manager_test_type',
'doc': {'bar': 'baz'}
})
pe.reset_state()
self.assertFalse(pe.is_action_needed())
e['foo'] = 'baz'
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt, {
'_id': 1,
'_index': 'default',
'_op_type': 'update',
'_type': 'manager_test_type',
'doc': {'foo': 'baz'}
})
pe.reset_state()
self.assertFalse(pe.is_action_needed())
del e['bar']
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt, {
'_id': 1,
'_index': 'default',
'_op_type': 'update',
'_type': 'manager_test_type',
'doc': {'bar': None}
})
pe.reset_state()
self.assertFalse(pe.is_action_needed())
e['foo'] = 'baz'
self.assertFalse(pe.is_action_needed())
def test_delete_entity(self):
e = ManagerTestType({'foo': 'bar'})
pe = PersistedEntity(e, state=REMOVE)
self.assertFalse(pe.is_action_needed())
e = ManagerTestType({'foo': 'bar', 'id': '1'})
pe = PersistedEntity(e, state=REMOVE)
self.assertTrue(pe.is_action_needed())
self.assertDictEqual(pe.stmt, {
'_id': '1',
'_index': 'default',
'_op_type': 'delete',
'_type': 'manager_test_type'
})
class EntityManagerTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(EntityManagerTestCase, cls).setUpClass()
cls._index = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
def tearDown(self):
es = Elasticsearch()
es.indices.delete(index=self._index, ignore=[404])
@property
def em(self):
return EntityManager(index=self._index)
def test_persist(self):
em = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
self.assertEqual(len(em._registry), 1)
e2 = ManagerTestType({'bar': 'baz'})
em.persist(e2)
self.assertEqual(len(em._registry), 2)
em.persist(e)
self.assertEqual(len(em._registry), 2)
self.assertRaises(TypeError, em.persist, dict())
def test_remove(self):
em = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
self.assertEqual(list(em._registry.values())[0].state, ADD)
em.remove(e)
self.assertEqual(list(em._registry.values())[0].state, REMOVE)
def test_flush(self):
em = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
e2 = ManagerTestType({'bar': 'baz'})
em.persist(e2)
em.flush()
self.assertTrue('id' in e)
self.assertTrue('id' in e2)
self.assertTrue(all(map(lambda pe: pe.state == UPDATE, em._registry.values())))
e2['bar'] = 'foo'
self.assertTrue(all(map(lambda pe: pe.state == UPDATE, em._registry.values())))
em.flush()
def test_find(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
em.flush()
fe = em2.find(e['id'], ManagerTestType)
self.assertDictEqual(e.to_representation(), fe.to_representation())
self.assertRaises(EntityNotFound, em2.find, 'non-exists', ManagerTestType)
def test_find_updated(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
em.flush()
e['bar'] = 'baz'
em.flush()
fe = em2.find(e['id'], ManagerTestType)
self.assertDictEqual(e.to_representation(), fe.to_representation())
def test_find_many(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar'})
e2 = ManagerTestType({'bar': 'baz'})
em.persist(e)
em.persist(e2)
em.flush()
fe = em2.find_many([e['id'], e2['id']], ManagerTestType)
self.assertDictEqual(e.to_representation(), fe[0].to_representation())
self.assertDictEqual(e2.to_representation(), fe[1].to_representation())
fe2 = em2.find_many({e2['id'], e['id']}, ManagerTestType)
def _ids_set(ents):
return {en['id'] for en in ents}
self.assertEqual(_ids_set(fe), _ids_set(fe2))
def test_query(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
e2 = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
e3 = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
em.persist(e)
em.persist(e2)
em.persist(e3)
em.flush()
em.get_client().indices.refresh(index=self._index)
fe, meta = em2.query({'query': {'term': {'foo': {'value': 'value'}}}}, ManagerTestType)
self.assertEqual(len(fe), 3)
def test_query_one(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
em.flush()
em.get_client().indices.refresh(index=self._index)
fe = em2.query_one({'query': {'term': {'foo': {'value': 'bar'}}}}, ManagerTestType)
self.assertEqual(fe['id'], e['id'])
e2 = ManagerTestType({'foo': 'bar'})
em.persist(e2)
em.flush()
em.get_client().indices.refresh(index=self._index)
self.assertRaises(RepositoryError, em2.query_one, {'query': {'term': {'foo': {'value': 'bar'}}}}, ManagerTestType)
def test_find_scope(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar', 'bar': 'baz'})
em.persist(e)
em.flush()
fe = em2.find(e['id'], ManagerTestType, scope='all')
self.assertDictEqual({'foo': 'bar', 'bar': 'baz'}, fe.to_representation())
fe2 = em2.find(e['id'], ManagerTestType, scope='small')
self.assertDictEqual({'foo': 'bar'}, fe2.to_representation())
# Real scope test
for key in ['bar', 'baz']:
self.assertFalse(key in fe2.keys())
def test_find_many_scope(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar', 'bar': 'baz'})
e2 = ManagerTestType({'foo': 'bar', 'bar': 'baz'})
em.persist(e)
em.persist(e2)
em.flush()
fe = em2.find_many([e['id'], e2['id']], ManagerTestType, scope='small')
self.assertDictEqual({'foo': 'bar'}, fe[0].to_representation())
self.assertDictEqual({'foo': 'bar'}, fe[1].to_representation())
# Real scope test
for key in ['bar', 'baz']:
for e in fe:
self.assertFalse(key in e.keys())
def test_query_scope(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
e2 = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
e3 = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
em.persist(e)
em.persist(e2)
em.persist(e3)
em.flush()
em.get_client().indices.refresh(index=self._index)
fe, meta = em2.query({'query': {'term': {'foo': {'value': 'value'}}}}, ManagerTestType, scope='small')
self.assertDictEqual({'foo': 'value'}, fe[0].to_representation())
self.assertDictEqual({'foo': 'value'}, fe[1].to_representation())
self.assertDictEqual({'foo': 'value'}, fe[2].to_representation())
# Real scope test
for key in ['bar', 'baz']:
for e in fe:
self.assertFalse(key in e.keys())
def test_query_one_scope(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'value', 'bar': 'baz', 'baz': 'foo'})
em.persist(e)
em.flush()
em.get_client().indices.refresh(index=self._index)
fe = em2.query_one({'query': {'term': {'foo': {'value': 'value'}}}}, ManagerTestType, scope='small')
self.assertDictEqual({'foo': 'value'}, fe.to_representation())
# Real scope test
for key in ['bar', 'baz']:
self.assertFalse(key in fe.keys())
def test_timestamps(self):
em = self.em
e = TimestampedType({'foo': 'bar'})
em.persist(e)
em.flush()
self.assertIsInstance(e['created_at'], datetime)
self.assertIsInstance(e['updated_at'], datetime)
self.assertEqual(e['created_at'], e['updated_at'])
e['baz'] = 'bar'
em.flush()
self.assertTrue(e['created_at'] < e['updated_at'])
values = e.to_representation()
self.assertIsInstance(values['created_at'], datetime)
self.assertIsInstance(values['updated_at'], datetime)
def test_pre_create_callback(self):
em = self.em
e = ManagerCallbacksTestType({'foo': 'bar'})
em.persist(e)
em.flush()
em2 = self.em
e2 = em2.find(e['id'], ManagerCallbacksTestType)
self.assertEqual(e2['pre_create'], 'bar')
def test_post_create_callback(self):
with patch.object(ManagerCallbacksTestType, 'post_create') as mock:
em = self.em
e = ManagerCallbacksTestType({'foo': 'bar'})
em.persist(e)
em.flush()
mock.assert_called_with(em)
def test_pre_update_callback(self):
em = self.em
e = ManagerCallbacksTestType({'foo': 'bar'})
em.persist(e)
em.flush()
e['bar'] = 'baz'
em.flush()
em2 = self.em
e2 = em2.find(e['id'], ManagerCallbacksTestType)
self.assertEqual(e2['pre_update'], 'bar')
def test_post_update_callback(self):
with patch.object(ManagerCallbacksTestType, 'post_update') as mock:
em = self.em
e = ManagerCallbacksTestType({'foo': 'bar'})
em.persist(e)
em.flush()
e['bar'] = 'baz'
em.flush()
mock.assert_called_with(em)
def test_pre_delete_callback(self):
with patch.object(ManagerCallbacksTestType, 'pre_delete') as mock:
em = self.em
e = ManagerCallbacksTestType({'foo': 'bar'})
em.persist(e)
em.flush()
em.remove(e)
em.flush()
mock.assert_called_with(em)
def test_post_delete_callback(self):
with patch.object(ManagerCallbacksTestType, 'post_delete') as mock:
em = self.em
e = ManagerCallbacksTestType({'foo': 'bar'})
em.persist(e)
em.flush()
em.remove(e)
em.flush()
mock.assert_called_with(em)
def test_clear(self):
em = self.em
e = ManagerTestType({'foo': 'bar'})
em.persist(e)
self.assertEqual(len(em._registry), 1)
em.clear()
self.assertEqual(len(em._registry), 0)
def test_highlight_query(self):
em = self.em
em2 = self.em
e = ManagerTestType({'foo': 'bar foo'})
em.persist(e)
em.flush()
em.get_client().indices.refresh(index=self._index)
fe, meta = em2.query({'query': {'match': {'foo': 'bar'}}, 'highlight': {'fields': {'foo': {}}}}, ManagerTestType)
self.assertDictEqual(fe[0].highlight, {'foo': ['<em>bar</em> foo']})
| {
"content_hash": "5742cc07bbba930b71f142d1c95d0515",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 122,
"avg_line_length": 35.567961165048544,
"alnum_prop": 0.5350757472362495,
"repo_name": "sicarrots/elasticdata",
"id": "82ba623a4ca8d41bad66c6e8db6627fb300c0354",
"size": "14678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38670"
}
],
"symlink_target": ""
} |
fileList = open("fileList.txt", "r")
fileName = fileList.readlines()
fileList.close()
startyear = 1900
endyear = 2017
outfile = open("year.txt", "w")
for i in range(len(fileName)):
oneFile = open(fileName[i].rstrip(), "r")
oneFileContent = oneFile.readlines()
oneFile.close()
for j in range(len(oneFileContent)):
yearList = []
isName = False
for k in range(len(oneFileContent[j]) - 3):
if '\"' in oneFileContent[j][k:k+4] or '\'' in oneFileContent[j][k:k+4]:
isName = not isName
continue
if (oneFileContent[j][k:k+4]).isdigit() and (not isName):
year = int(oneFileContent[j][k:k+4])
if year <= endyear and year >= startyear:
yearList.append(str(year))
if len(yearList) > 0:
outfile.write(yearList[len(yearList) - 1] + '\n')
outfile.close()
| {
"content_hash": "6ff069c1b43b0f8fc1329ba71f33b281",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 30.838709677419356,
"alnum_prop": 0.5366108786610879,
"repo_name": "BIDS-collaborative/cega-trace",
"id": "ac9e6453120f0620416aa039185b6e543ca37e90",
"size": "956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "findyear.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "53570"
},
{
"name": "R",
"bytes": "2611"
},
{
"name": "TeX",
"bytes": "12006"
}
],
"symlink_target": ""
} |
from random import shuffle
from deck import Deck
from copy import copy
from events import raise_strategy_card_events, raise_strategy_deck_events
from card_decoder.cards import Acquirable
from src.strategies.strategy import Strategy
# Number of apprentices and militia in each player's starting deck
NUM_APPRENTICE = 8
NUM_MILITIA = 2
HAND_SIZE = 5
def create_initial_player_deck(card_dictionary):
apprentice = card_dictionary.find_card("Apprentice")
militia = card_dictionary.find_card("Militia")
return Deck([apprentice] * NUM_APPRENTICE + [militia] * NUM_MILITIA)
class Player(object):
def __init__(self, board, strategy, player_index, card_dictionary):
self.board = board
self.player_index = player_index
self.strategy = strategy
self.runes_remaining = 0
self.power_remaining = 0
self.honor = 0
self.played_cards = []
self.acquired_cards = []
self.discard = []
self.constructs = []
self.moves = []
self.clear_per_turn_state()
self.deck = create_initial_player_deck(card_dictionary)
self.hand = []
for i in xrange(HAND_SIZE):
self.draw_card()
def all_cards(self):
return (self.deck.cards + self.played_cards + self.acquired_cards +
self.discard + self.constructs)
def clear_per_turn_state(self):
# keys in this dictionary should only be at most the number of that card
# (keys are card names)
self.num_times_construct_activated = {}
self.runes_toward_constructs = 0
self.runes_toward_mechana_constructs = 0
self.honor_for_lifebound_hero = 0
self.should_take_additional_turn = False
self.honor_for_defeating_monster = 0
self.played_constructs = []
def compute_honor(self):
all_cards = self.all_cards()
assert all(isinstance(card, Acquirable) for card in all_cards)
return self.honor + sum(card.honor for card in all_cards)
# This is important to do because this will be used by the player when trying
# to decide what cards to play. We need to copy it because playing cards
# involves removing them from the hand, and we don't want to get in a weird
# state where we're modifying the hand while iterating on it.
def get_hand(self):
return copy(self.hand)
# This attmepts to draw a card from the deck (putting it into the hand).
# If there are no cards in the deck:
# If there are cards in the discard pile, it shuffles those into the deck
# Otherwise, it does nothing
def draw_card(self):
if not self.deck.has_cards_left():
if len(self.discard) == 0:
return
raise_strategy_deck_events(self.board, 'deck_finished')
self.deck.shuffle_in_cards(self.discard)
self.discard = []
self.hand.append(self.deck.get_next_card())
# Raises an exception if the card wasn't found. Returns the card
# that it removed.
def _remove_card_from_pile(self, pile_name, pile, card_name):
cards = [card for card in pile if card.name == card_name]
if len(cards) == 0:
pile_str = ', '.join(card.name for card in pile)
raise Exception('Card %s not found in %s (%s)' % (
card_name, pile_name, pile_str))
pile.remove(cards[0])
return cards[0]
def remove_card_from_hand(self, card_name):
return self._remove_card_from_pile("hand", self.hand, card_name)
def remove_card_from_discard(self, card_name):
return self._remove_card_from_pile("discard", self.discard, card_name)
def remove_card_from_constructs(self, card_name):
return self._remove_card_from_pile("constructs", self.constructs, card_name)
def remove_card_from_played_cards(self, card_name):
return self._remove_card_from_pile("played cards", self.played_cards, card_name)
def remove_card_from_acquired_cards(self, card_name):
return self._remove_card_from_pile("acquired cards", self.acquired_cards, card_name)
def acquire(self, card):
# Similar to why we don't play cards into the discard (see below)
self.acquired_cards.append(card)
def can_acquire_card(self, card):
rune_cost = card.cost
if self.considers_card_mechana_construct(card):
rune_cost -= self.runes_toward_mechana_constructs
if card.is_construct():
rune_cost -= self.runes_toward_constructs
return self.runes_remaining >= rune_cost
# Raises an exception if there aren't enough runes and credits to pay for it
def pay_for_acquired_card(self, card):
assert self.can_acquire_card(card)
cost = card.cost
if self.considers_card_mechana_construct(card):
paying = min(self.runes_toward_mechana_constructs, cost)
self.runes_toward_mechana_constructs -= paying
cost -= paying
if card.is_construct():
paying = min(self.runes_toward_constructs, cost)
self.runes_toward_constructs -= paying
cost -= paying
self.runes_remaining -= cost
assert self.runes_remaining >= 0, "Did not have enough runes to acquire %s" % (
card.name)
def has_played_mechana_construct(self):
return any(self.considers_card_mechana_construct(card) for card in self.played_constructs)
# Note that the cards don't go immediately into the discard. This would
# allow certain strategies that cycle through the deck several times
# in a turn. Instead, we hold the cards until the end of the turn, at
# which point we add them to the discard pile.
def play_card(self, card_name):
card = self.remove_card_from_hand(card_name)
if card.is_construct():
self.constructs.append(card)
self.played_constructs.append(card)
raise_strategy_card_events(self.board, 'construct_placed', card_name)
else:
self.played_cards.append(card)
def can_activate_construct(self, card_name):
count_of_construct = sum(1 for card in self.constructs
if card.name == card_name)
assert count_of_construct > 0
return card_name not in self.num_times_construct_activated or \
self.num_times_construct_activated[card_name] < count_of_construct
# Doesn't actually perform the effect. Just ensures the player can activate
def activate_construct(self, card_name):
count_of_construct = sum(1 for card in self.constructs
if card.name == card_name)
assert count_of_construct > 0, ("Player doesn't have %s in play, but tried" +
" to activate it" % card_name)
if card_name not in self.num_times_construct_activated:
self.num_times_construct_activated[card_name] = 0
assert self.num_times_construct_activated[card_name] < count_of_construct, ("Player" +
" has already activated %s as many times as he can (%d)" % (
card_name, count_of_construct))
self.num_times_construct_activated[card_name] += 1
# Move a given card to the discard pile. Raises an exception if the card
# wasn't found.
def discard_card(self, card_name):
card = self.remove_card_from_hand(card_name)
self.discard.append(card)
def has_hedron_in_play(self):
return any(construct.name == "Hedron Link Device" for construct in self.constructs)
def considers_card_mechana_construct(self, card):
return card.card_type == "Mechana Construct" or (
card.card_type == "Construct" and self.has_hedron_in_play())
# Discard all leftover cards and draw HAND_SIZE new cards (shuffling if need be).
# Also reset runes and power to 0.
def end_turn(self):
self.runes_remaining = 0
self.power_remaining = 0
self.discard.extend(self.hand)
self.discard.extend(self.played_cards)
self.discard.extend(self.acquired_cards)
self.hand = []
self.played_cards = []
self.acquired_cards = []
self.clear_per_turn_state()
for i in xrange(HAND_SIZE):
self.draw_card()
| {
"content_hash": "81898b21312fcebb382a04c3c4dc14ea",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 94,
"avg_line_length": 34.527027027027025,
"alnum_prop": 0.6919765166340509,
"repo_name": "obi1kenobi/ascension-bot",
"id": "78eeb8df84ad5c42930b5d1a2e0bd3c9f87cbf82",
"size": "7665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145519"
}
],
"symlink_target": ""
} |
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3.s3utils import s3_fullname
T = current.T
settings = current.deployment_settings
"""
Template settings for US
All settings which are to configure a specific template are located here
Deployers should ideally not need to edit any other files outside of their template folder
"""
# Pre-Populate
settings.base.prepopulate = ["NYC"]
settings.base.system_name = T("NYC Prepared")
settings.base.system_name_short = T("NYC Prepared")
# Theme (folder to use for views/layout.html)
settings.base.theme = "NYC"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.msg.parser = "NYC"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC -0500"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# PDF to Letter
settings.base.paper_size = T("Letter")
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ["US"]
settings.fin.currencies = {
"USD" : T("United States Dollars"),
}
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
settings.security.self_registration = "index"
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# Uncomment this to request the Mobile Phone when a user registers
settings.auth.registration_requests_mobile_phone = True
# Uncomment this to request the Organisation when a user registers
settings.auth.registration_requests_organisation = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Roles that newly-registered users get automatically
#settings.auth.registration_roles = { 0: ["comms_dispatch"]}
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# #"volunteer":T("Volunteer")
# }
settings.auth.registration_link_user_to_default = "staff"
settings.security.policy = 5 # Controller, Function & Table ACLs
settings.ui.update_label = "Edit"
settings.ui.label_attachments = "Media"
# Uncomment to disable checking that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# Uncomment to show created_by/modified_by using Names not Emails
settings.ui.auth_user_represent = "name"
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ["org_organisation",
]
# -----------------------------------------------------------------------------
# Audit
def audit_write(method, tablename, form, record, representation):
if not current.auth.user:
# Don't include prepop
return False
if tablename in ("cms_post",
"org_facility",
"org_organisation",
"req_req",
):
# Perform normal Audit
return True
else:
# Don't Audit non user-visible resources
return False
settings.security.audit_write = audit_write
# -----------------------------------------------------------------------------
# CMS
# Uncomment to use Bookmarks in Newsfeed
settings.cms.bookmarks = True
# Uncomment to use have Filter form in Newsfeed be open by default
settings.cms.filter_open = True
# Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page
settings.cms.location_click_filters = True
# Uncomment to use organisation_id instead of created_by in Newsfeed
settings.cms.organisation = "post_organisation.organisation_id"
# Uncomment to use org_group_id in Newsfeed
settings.cms.organisation_group = "post_organisation_group.group_id"
# Uncomment to use person_id instead of created_by in Newsfeed
settings.cms.person = "person_id"
# Uncomment to use Rich Text editor in Newsfeed
settings.cms.richtext = True
# Uncomment to show Links in Newsfeed
settings.cms.show_links = True
# Uncomment to show Tags in Newsfeed
settings.cms.show_tags = True
# Uncomment to show post Titles in Newsfeed
settings.cms.show_titles = True
# -----------------------------------------------------------------------------
# Inventory Management
# Uncomment to customise the label for Facilities in Inventory Management
settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
settings.inv.stock_count = True
# Uncomment to not track pack values
settings.inv.track_pack_values = False
settings.inv.send_show_org = False
# Types common to both Send and Receive
settings.inv.shipment_types = {
1: T("Other Warehouse")
}
settings.inv.send_types = {
#21: T("Distribution")
}
settings.inv.send_type_default = 1
settings.inv.item_status = {
#0: current.messages["NONE"],
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
# -----------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Groups
settings.org.groups = "Network"
# Make Services Hierarchical
settings.org.services_hierarchical = True
# Set the label for Sites
settings.org.site_label = "Facility"
#settings.org.site_label = "Location"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
settings.org.site_last_contacted = True
# Enable certain fields just for specific Organisations
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = { \
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# Uncomment to use an Autocomplete for Site lookup fields
settings.org.site_autocomplete = True
# Extra fields to search in Autocompletes & display in Representations
settings.org.site_autocomplete_fields = ("organisation_id$name",
"location_id$addr_street",
)
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = True
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Legend
@ToDo: Use Symbology
"""
db = current.db
s3db = current.s3db
table = db.org_facility_type
ltable = db.org_site_facility_type
query = (ltable.site_id == record.site_id) & \
(ltable.facility_type_id == table.id)
rows = db(query).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def org_facility_onvalidation(form):
"""
Default the name to the Street Address
"""
form_vars = form.vars
name = form_vars.get("name", None)
if name:
return
address = form_vars.get("address", None)
if address:
form_vars.name = address
else:
# We need a default
form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
"""
Customise org_facility controller
"""
s3db = current.s3db
s3 = current.response.s3
# Tell the client to request per-feature markers
s3db.configure("org_facility", marker_fn=facility_marker_fn)
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if r.method not in ("read", "update"):
types = r.get_vars.get("site_facility_type.facility_type_id__belongs", None)
if not types:
# Hide Private Residences
from s3.s3resource import S3FieldSelector
s3.filter = S3FieldSelector("site_facility_type.facility_type_id$name") != "Private Residence"
if r.interactive:
tablename = "org_facility"
table = s3db[tablename]
if not r.component and r.method in (None, "create", "update"):
from s3.s3validators import IS_LOCATION_SELECTOR2
from s3.s3widgets import S3LocationSelectorWidget2, S3MultiSelectWidget
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
table.organisation_id.widget = S3MultiSelectWidget(multiple=False)
if r.get_vars.get("format", None) == "popup":
# Coming from req/create form
# Hide most Fields
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
# We default this onvalidation
table.name.notnull = False
table.name.requires = None
crud_form = S3SQLCustomForm(S3SQLInlineComponent(
"site_facility_type",
label = T("Facility Type"),
fields = [("", "facility_type_id")],
multiple = False,
required = True,
),
"name",
"location_id",
)
s3db.configure(tablename,
crud_form = crud_form,
onvalidation = org_facility_onvalidation,
)
return True
s3.prep = custom_prep
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
"""
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
list_fields = ["id",
"name",
"acronym",
"organisation_type_id",
(T("Services"), "service.name"),
(T("Neighborhoods Served"), "location.name"),
]
s3db.configure("org_organisation",
list_fields = list_fields)
if r.interactive:
if not r.component:
from gluon.html import DIV, INPUT
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
crud_form = S3SQLCustomForm(
"name",
"acronym",
"organisation_type_id",
S3SQLInlineComponentMultiSelectWidget(
"service",
label = T("Services"),
field = "service_id",
#cols = 4,
),
S3SQLInlineComponentMultiSelectWidget(
"group",
label = T("Network"),
field = "group_id",
#cols = 3,
),
S3SQLInlineComponent(
"address",
label = T("Address"),
multiple = False,
# This is just Text - put into the Comments box for now
# Ultimately should go into location_id$addr_street
fields = [("", "comments")],
),
S3SQLInlineComponentMultiSelectWidget(
"location",
label = T("Neighborhoods Served"),
field = "location_id",
filterby = dict(field = "level",
options = "L4"
),
# @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget
#cols = 5,
),
"phone",
S3SQLInlineComponent(
"contact",
name = "phone2",
label = T("Phone2"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import"),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"comments",
)
from s3.s3filter import S3LocationFilter, S3OptionsFilter, S3TextFilter
filter_widgets = [
S3TextFilter(["name", "acronym"],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("group_membership.group_id",
label = T("Network"),
represent = "%(name)s",
#hidden = True,
),
S3LocationFilter("organisation_location.location_id",
label = T("Neighborhood"),
levels = ("L3", "L4"),
#hidden = True,
),
S3OptionsFilter("service_organisation.service_id",
#label = T("Service"),
#hidden = True,
),
S3OptionsFilter("organisation_type_id",
label = T("Type"),
#hidden = True,
),
]
s3db.configure("org_organisation",
crud_form = crud_form,
filter_widgets = filter_widgets,
)
s3db.configure("pr_contact",
onaccept = pr_contact_onaccept,
)
elif r.component_name == "facility":
if r.method in (None, "create", "update"):
from s3.s3validators import IS_LOCATION_SELECTOR2
from s3.s3widgets import S3LocationSelectorWidget2
table = s3db.org_facility
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
elif r.component_name == "human_resource":
# Don't assume that user is from same org/site as Contacts they create
r.component.table.site_id.default = None
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "rheader" in output:
# Custom Tabs
tabs = [(T("Basic Details"), None),
(T("Contacts"), "human_resource"),
(T("Facilities"), "facility"),
(T("Projects"), "project"),
(T("Assets"), "asset"),
]
output["rheader"] = s3db.org_rheader(r, tabs=tabs)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_org_group_controller(**attr):
"""
Customise org_group controller
"""
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive:
if not r.component:
from gluon.html import DIV, INPUT
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
if r.method != "read":
from gluon.validators import IS_EMPTY_OR
from s3.s3validators import IS_LOCATION_SELECTOR2
from s3.s3widgets import S3LocationSelectorWidget2
field = s3db.org_group.location_id
field.label = "" # Gets replaced by widget
#field.requires = IS_LOCATION_SELECTOR2(levels=("L2",))
field.requires = IS_EMPTY_OR(
IS_LOCATION_SELECTOR2(levels=("L2",))
)
field.widget = S3LocationSelectorWidget2(levels=("L2",),
polygons=True,
)
# Default location to Manhattan
db = current.db
gtable = db.gis_location
query = (gtable.name == "New York") & \
(gtable.level == "L2")
manhattan = db(query).select(gtable.id,
limitby=(0, 1)).first()
if manhattan:
field.default = manhattan.id
crud_form = S3SQLCustomForm(
"name",
"location_id",
S3SQLInlineComponent(
"contact",
name = "phone",
label = T("Phone"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import"),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"comments",
)
s3db.configure("org_group",
crud_form = crud_form,
)
s3db.configure("pr_contact",
onaccept = pr_contact_onaccept,
)
elif r.component_name == "pr_group":
field = s3db.pr_group.group_type
field.default = 3 # Relief Team, to show up in hrm/group
field.readable = field.writable = False
return result
s3.prep = custom_prep
# Allow components with components (such as org/group) to breakout from tabs
attr["native"] = True
return attr
settings.customise_org_group_controller = customise_org_group_controller
# -----------------------------------------------------------------------------
# Persons
# Uncomment to hide fields in S3AddPersonWidget
settings.pr.request_dob = False
settings.pr.request_gender = False
# Doesn't yet work (form fails to submit)
#settings.pr.select_existing = False
# -----------------------------------------------------------------------------
# Persons
def customise_pr_person_controller(**attr):
"""
Customise pr_person controller
"""
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive and \
r.component_name == "group_membership":
current.s3db.pr_group_membership.group_head.label = T("Group Chairperson")
return result
s3.prep = custom_prep
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
# Groups
def chairperson(row):
"""
Virtual Field to show the chairperson of a group
"""
if hasattr(row, "pr_group"):
row = row.pr_group
try:
group_id = row.id
except:
# not available
return current.messages["NONE"]
db = current.db
s3db = current.s3db
mtable = s3db.pr_group_membership
ptable = db.pr_person
query = (mtable.group_id == group_id) & \
(mtable.group_head == True) & \
(mtable.person_id == ptable.id)
chair = db(query).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.id,
limitby=(0, 1)).first()
if chair:
# Only used in list view so HTML is OK
return A(s3_fullname(chair),
_href=URL(c="hrm", f="person", args=chair.id))
else:
return current.messages["NONE"]
def customise_pr_group_controller(**attr):
"""
Customise pr_group controller
"""
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
s3db = current.s3db
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_group_team="group_id")
from s3.s3fields import S3Represent
from s3.s3filter import S3TextFilter, S3OptionsFilter
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
s3db.org_group_team.org_group_id.represent = S3Represent(lookup="org_group",
show_link=True)
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("group_team",
label = T("Network"),
fields = [("", "org_group_id")],
# @ToDo: Make this optional?
multiple = False,
),
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"group_team.org_group_id$name",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by network name. You may use % as wildcard. Press 'Search' without input to list all."),
#_class = "filter-search",
),
S3OptionsFilter("group_team.org_group_id",
label = T("Network"),
#hidden = True,
),
]
s3db.configure("pr_group",
crud_form = crud_form,
filter_widgets = filter_widgets,
)
#if r.component_name == "group_membership":
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
#else:
# # RHeader wants a simplified version, but don't want inconsistent across tabs
# s3db.pr_group_membership.group_head.label = T("Chairperson")
return result
s3.prep = custom_prep
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# -----------------------------------------------------------------------------
def customise_pr_group_resource(r, tablename):
"""
Customise pr_group resource (in group & org_group controllers)
- runs after controller customisation
- but runs before prep
"""
from gluon import Field, URL
from gluon.sqlhtml import TextWidget
s3db = current.s3db
table = s3db.pr_group
# Increase size of widget
table.description.widget = TextWidget.widget
table.chairperson = Field.Method("chairperson", chairperson)
list_fields = ["id",
(T("Network"), "group_team.org_group_id"),
"name",
"description",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
# Redirect to member list when a new group has been created
create_next = URL(c="hrm", f="group",
args=["[id]", "group_membership"]),
list_fields = list_fields,
)
settings.customise_pr_group_resource = customise_pr_group_resource
# -----------------------------------------------------------------------------
def pr_contact_onaccept(form):
"""
Import Organisation/Network RSS Feeds
"""
form_vars = form.vars
contact_method = form_vars.contact_method
if not contact_method or contact_method != "RSS":
return
no_import = current.request.post_vars.get("rss_no_import", None)
if no_import:
return
url = form_vars.value
db = current.db
s3db = current.s3db
table = s3db.msg_rss_channel
exists = db(table.url == url).select(table.id, limitby=(0, 1))
if exists:
return
# Lookup name of Org/Network
pe_id = form_vars.pe_id
etable = db.pr_pentity
instance_type = db(etable.pe_id == pe_id).select(etable.instance_type,
limitby=(0, 1)
).first().instance_type
otable = db[instance_type]
name = db(otable.pe_id == pe_id).select(otable.name,
limitby=(0, 1)
).first().name
# Add RSS Channel
id = table.insert(name=name, enabled=True, url=url)
record = dict(id=id)
s3db.update_super(table, record)
# Enable
channel_id = record["channel_id"]
s3db.msg_channel_enable("msg_rss_channel", channel_id)
# Setup Parser
table = s3db.msg_parser
id = table.insert(channel_id=channel_id, function_name="parse_rss", enabled=True)
s3db.msg_parser_enable(id)
# Check Now
async = current.s3task.async
async("msg_poll", args=["msg_rss_channel", channel_id])
async("msg_parse", args=[channel_id, "parse_rss"])
# -----------------------------------------------------------------------------
# Human Resource Management
# Uncomment to chage the label for 'Staff'
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to allow Staff & Volunteers to be registered without an Organisation
settings.hrm.org_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Certificates
settings.hrm.use_certificates = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to enable the use of HR Education
settings.hrm.use_education = False
# Uncomment to disable the use of HR Skills
#settings.hrm.use_skills = False
# Uncomment to disable the use of HR Trainings
settings.hrm.use_trainings = False
# Uncomment to disable the use of HR Description
settings.hrm.use_description = False
# Change the label of "Teams" to "Groups"
settings.hrm.teams = "Groups"
# Custom label for Organisations in HR module
#settings.hrm.organisation_label = "National Society / Branch"
settings.hrm.organisation_label = "Organization"
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
"""
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component:
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
],
label = T("Name"),
),
S3OptionsFilter("organisation_id",
filter = True,
header = "",
hidden = True,
),
S3OptionsFilter("group_person.group_id",
label = T("Network"),
#filter = True,
#header = "",
hidden = True,
),
S3LocationFilter("location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
hidden = True,
),
S3OptionsFilter("site_id",
hidden = True,
),
S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
),
S3OptionsFilter("group_membership.group_id",
label = T("Team"),
filter = True,
header = "",
hidden = True,
),
]
s3db = current.s3db
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
)
field = r.table.site_id
# Don't assume that user is from same org/site as Contacts they create
field.default = None
# Use a hierarchical dropdown instead of AC
field.widget = None
script = \
'''S3OptionsFilter({
'triggerName':'organisation_id',
'targetName':'site_id',
'lookupResource':'site',
'lookupURL':'/%s/org/sites_for_org/',
'optional':true
})''' % r.application
s3.jquery_ready.append(script)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
"""
Customise hrm_human_resource resource (in facility, human_resource, organisation & person controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("person_id",
"organisation_id",
"site_id",
S3SQLInlineComponent(
"group_person",
label = T("Network"),
link = False,
fields = [("", "group_id")],
multiple = False,
),
"job_title_id",
"start_date",
)
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
(T("Network"), "group_person.group_id"),
(T("Groups"), "person_id$group_membership.group_id"),
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
s3db.configure("hrm_human_resource",
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
"""
Customise hrm_job_title controller
"""
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
table = current.s3db.hrm_job_title
table.organisation_id.readable = table.organisation_id.writable = False
table.type.readable = table.type.writable = False
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
# Projects
# Use codes for projects (called 'blurb' in NYC)
settings.project.codes = True
# Uncomment this to use settings suitable for detailed Task management
settings.project.mode_task = False
# Uncomment this to use Activities for projects
settings.project.activities = True
# Uncomment this to use Milestones in project/task.
settings.project.milestones = False
# Uncomment this to disable Sectors in projects
settings.project.sectors = False
# Multiple partner organizations
settings.project.multiple_organisations = True
def customise_project_project_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component and (r.interactive or r.representation == "aadata"):
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox
s3db = current.s3db
table = r.table
tablename = "project_project"
table.code.label = T("Project blurb (max. 100 characters)")
table.code.max_length = 100
table.comments.label = T("How people can help")
script = '''$('#project_project_code').attr('maxlength','100')'''
s3.jquery_ready.append(script)
crud_form = S3SQLCustomForm(
"organisation_id",
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
"calendar",
#"drr.hfa",
#"objectives",
"human_resource_id",
# Activities
S3SQLInlineComponent(
"location",
label = T("Location"),
fields = [("", "location_id")],
),
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
"comments", # NB This is labelled 'Role' in DRRPP
],
filterby = dict(field = "role",
options = "2"
)
),
S3SQLInlineComponent(
"document",
name = "media",
label = T("URLs (media, fundraising, website, social media, etc."),
fields = ["document_id",
"name",
"url",
"comments",
],
filterby = dict(field = "name")
),
S3SQLInlineComponentCheckbox(
"activity_type",
label = T("Categories"),
field = "activity_type_id",
cols = 3,
# Filter Activity Type by Project
filter = {"linktable": "project_activity_type_project",
"lkey": "project_id",
"rkey": "activity_type_id",
},
),
#"budget",
#"currency",
"comments",
)
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter
filter_widgets = [
S3TextFilter(["name",
"code",
"description",
"organisation.name",
"organisation.acronym",
],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("status_id",
label = T("Status"),
# Not translateable
#represent = "%(name)s",
cols = 3,
),
#S3OptionsFilter("theme_project.theme_id",
# label = T("Theme"),
# #hidden = True,
# ),
S3LocationFilter("location.location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
#hidden = True,
),
# @ToDo: Widget to handle Start & End in 1!
S3DateFilter("start_date",
label = T("Start Date"),
hide_time = True,
#hidden = True,
),
S3DateFilter("end_date",
label = T("End Date"),
hide_time = True,
#hidden = True,
),
]
list_fields = ["id",
"name",
"code",
"organisation_id",
"start_date",
"end_date",
(T("Locations"), "location.location_id"),
]
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
# Requests Management
settings.req.req_type = ["People", "Stock"]#, "Summary"]
settings.req.prompt_match = False
#settings.req.use_commit = False
settings.req.requester_optional = True
settings.req.date_writable = False
settings.req.item_quantities_writable = True
settings.req.skill_quantities_writable = True
settings.req.items_ask_purpose = False
#settings.req.use_req_number = False
# Label for Requester
settings.req.requester_label = "Site Contact"
# Filter Requester as being from the Site
settings.req.requester_from_site = True
# Label for Inventory Requests
settings.req.type_inv_label = "Supplies"
# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities
settings.req.summary = True
# -----------------------------------------------------------------------------
def req_req_postprocess(form):
"""
Runs after crud_form completes
- creates a cms_post in the newswire
- @ToDo: Send out Tweets
"""
req_id = form.vars.id
db = current.db
s3db = current.s3db
rtable = s3db.req_req
# Read the full record
row = db(rtable.id == req_id).select(rtable.type,
rtable.site_id,
rtable.requester_id,
rtable.priority,
rtable.date_required,
rtable.purpose,
rtable.comments,
limitby=(0, 1)
).first()
# Build Title & Body from the Request details
priority = rtable.priority.represent(row.priority)
date_required = row.date_required
if date_required:
date = rtable.date_required.represent(date_required)
title = "%(priority)s by %(date)s" % dict(priority=priority,
date=date)
else:
title = priority
body = row.comments
if row.type == 1:
# Items
ritable = s3db.req_req_item
items = db(ritable.req_id == req_id).select(ritable.item_id,
ritable.item_pack_id,
ritable.quantity)
item_represent = s3db.supply_item_represent
pack_represent = s3db.supply_item_pack_represent
for item in items:
item = "%s %s %s" % (item.quantity,
pack_represent(item.item_pack_id),
item_represent(item.item_id))
body = "%s\n%s" % (item, body)
else:
# Skills
body = "%s\n%s" % (row.purpose, body)
rstable = s3db.req_req_skill
skills = db(rstable.req_id == req_id).select(rstable.skill_id,
rstable.quantity)
skill_represent = s3db.hrm_multi_skill_represent
for skill in skills:
item = "%s %s" % (skill.quantity, skill_represent(skill.skill_id))
body = "%s\n%s" % (item, body)
# Lookup series_id
stable = s3db.cms_series
try:
series_id = db(stable.name == "Request").select(stable.id,
cache=s3db.cache,
limitby=(0, 1)
).first().id
except:
# Prepop hasn't been run
series_id = None
# Location is that of the site
otable = s3db.org_site
location_id = db(otable.site_id == row.site_id).select(otable.location_id,
limitby=(0, 1)
).first().location_id
# Create Post
ptable = s3db.cms_post
id = ptable.insert(series_id=series_id,
title=title,
body=body,
location_id=location_id,
person_id=row.requester_id,
)
record = dict(id=id)
s3db.update_super(ptable, record)
# Add source link
url = "%s%s" % (settings.get_base_public_url(),
URL(c="req", f="req", args=req_id))
s3db.doc_document.insert(doc_id=record["doc_id"],
url=url,
)
# -----------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
from s3layouts import S3AddResourceLink
current.s3db.req_req.site_id.comment = \
S3AddResourceLink(c="org", f="facility",
vars = dict(child="site_id"),
title=T("Create Facility"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
current.response.s3.req_req_postprocess = req_req_postprocess
settings.customise_req_req_resource = customise_req_req_resource
# -----------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Admin"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
# Uncomment to enable internal support requests
#("support", Storage(
# name_nice = T("Support"),
# #description = "Support Requests",
# restricted = True,
# module_type = None # This item is handled separately for the menu
# )),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 9, # 8th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Locations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 4
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Contacts"),
#description = "Human Resources Management",
restricted = True,
module_type = 3,
)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
# )),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Inventory"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 10
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 10,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 1,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 10
)),
("assess", Storage(
name_nice = T("Assessments"),
#description = "Rapid Assessments & Flexible Impact Assessments",
restricted = True,
module_type = 5,
)),
("survey", Storage(
name_nice = T("Surveys"),
#description = "Create, enter, and manage surveys.",
restricted = True,
module_type = 5,
)),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
# )),
#("hms", Storage(
# name_nice = T("Hospitals"),
# #description = "Helps to monitor status of hospitals",
# restricted = True,
# module_type = 1
# )),
#("irs", Storage(
# name_nice = T("Incidents"),
# #description = "Incident Reporting System",
# restricted = False,
# module_type = 10
# )),
#("dvi", Storage(
# name_nice = T("Disaster Victim Identification"),
# #description = "Disaster Victim Identification",
# restricted = True,
# module_type = 10,
# #access = "|DVI|", # Only users with the DVI role can see this module in the default menu & access the controller
# )),
#("mpr", Storage(
# name_nice = T("Missing Person Registry"),
# #description = "Helps to report and search for missing persons",
# restricted = False,
# module_type = 10,
# )),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = False,
# module_type = 10,
# )),
#("scenario", Storage(
# name_nice = T("Scenarios"),
# #description = "Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).",
# restricted = True,
# module_type = 10,
# )),
#("event", Storage(
# name_nice = T("Events"),
# #description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
# restricted = True,
# module_type = 10,
# )),
#("fire", Storage(
# name_nice = T("Fire Stations"),
# #description = "Fire Station Management",
# restricted = True,
# module_type = 1,
# )),
#("flood", Storage(
# name_nice = T("Flood Warnings"),
# #description = "Flood Gauges show water levels in various parts of the country",
# restricted = False,
# module_type = 10
# )),
#("member", Storage(
# name_nice = T("Members"),
# #description = "Membership Management System",
# restricted = True,
# module_type = 10,
# )),
#("patient", Storage(
# name_nice = T("Patient Tracking"),
# #description = "Tracking of Patients",
# restricted = True,
# module_type = 10
# )),
#("security", Storage(
# name_nice = T("Security"),
# #description = "Security Management System",
# restricted = True,
# module_type = 10,
# )),
# These are specialist modules
# Requires RPy2
#("climate", Storage(
# name_nice = T("Climate"),
# #description = "Climate data portal",
# restricted = True,
# module_type = 10,
#)),
#("delphi", Storage(
# name_nice = T("Delphi Decision Maker"),
# #description = "Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.",
# restricted = False,
# module_type = 10,
# )),
# @ToDo: Rewrite in a modern style
#("budget", Storage(
# name_nice = T("Budgeting Module"),
# #description = "Allows a Budget to be drawn up",
# restricted = True,
# module_type = 10
# )),
# @ToDo: Port these Assessments to the Survey module
#("building", Storage(
# name_nice = T("Building Assessments"),
# #description = "Building Safety Assessments",
# restricted = True,
# module_type = 10,
# )),
#("impact", Storage(
# name_nice = T("Impacts"),
# #description = "Used by Assess",
# restricted = True,
# module_type = None,
# )),
#("ocr", Storage(
# name_nice = T("Optical Character Recognition"),
# #description = "Optical Character Recognition for reading the scanned handwritten paper forms.",
# restricted = False,
# module_type = None
# )),
])
| {
"content_hash": "dc16cb2b19710f9985c28abeffe9ff8b",
"timestamp": "",
"source": "github",
"line_count": 1716,
"max_line_length": 187,
"avg_line_length": 39.273310023310025,
"alnum_prop": 0.4643212203047794,
"repo_name": "code-for-india/sahana_shelter_worldbank",
"id": "9cdc05d9974ce5283dc74f6beef3f0133f554397",
"size": "67419",
"binary": false,
"copies": "1",
"ref": "refs/heads/hackathon",
"path": "private/templates/NYC/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1214342"
},
{
"name": "JavaScript",
"bytes": "16755282"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "27298931"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2245739"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.