text stringlengths 4 1.02M | meta dict |
|---|---|
from coffin.template import Library
from django.template.loader import render_to_string
from jinja2 import nodes
from jinja2.ext import Extension
register = Library()
class MerchantExtension(Extension):
tags = set(['render_integration'])
def parse(self, parser):
stream = parser.stream
lineno = stream.next().lineno
obj = parser.parse_expression()
call_node = self.call_method('render_integration', args=[obj])
return nodes.Output([call_node]).set_lineno(lineno)
@classmethod
def render_integration(self, obj):
form_str = render_to_string(obj.template, {'integration': obj})
return form_str
register.tag(MerchantExtension)
| {
"content_hash": "f6d7d1296207b731d2aa8526f8891f56",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 25.178571428571427,
"alnum_prop": 0.6907801418439716,
"repo_name": "ericholscher/merchant",
"id": "1bf9bd1fa9d55695c3055ee597f20bed8a391c3c",
"size": "705",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "billing/templatetags/jinja2_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from oslo_log import log
from stevedore import extension
from ceilometer import agent
from ceilometer.pipeline import base
LOG = log.getLogger(__name__)
class SampleEndpoint(base.NotificationEndpoint):
def info(self, notifications):
"""Convert message at info level to Ceilometer sample.
:param notifications: list of notifications
"""
return self.process_notifications('info', notifications)
def sample(self, notifications):
"""Convert message at sample level to Ceilometer Event.
:param notifications: list of notifications
"""
return self.process_notifications('sample', notifications)
def process_notifications(self, priority, notifications):
for message in notifications:
try:
with self.publisher as p:
p(list(self.build_sample(message)))
except Exception:
LOG.error('Fail to process notification', exc_info=True)
def build_sample(notification):
"""Build sample from provided notification."""
pass
class SampleSource(base.PipelineSource):
"""Represents a source of samples.
In effect it is a set of notification handlers processing
samples for a set of matching meters. Each source encapsulates meter name
matching and mapping to one or more sinks for publication.
"""
def __init__(self, cfg):
super(SampleSource, self).__init__(cfg)
try:
self.meters = cfg['meters']
except KeyError:
raise base.PipelineException("Missing meters value", cfg)
try:
self.check_source_filtering(self.meters, 'meters')
except agent.SourceException as err:
raise base.PipelineException(err.msg, cfg)
def support_meter(self, meter_name):
return self.is_supported(self.meters, meter_name)
class SampleSink(base.Sink):
def publish_samples(self, samples):
"""Push samples into pipeline for publishing.
:param samples: Sample list.
"""
if samples:
for p in self.publishers:
try:
p.publish_samples(samples)
except Exception:
LOG.error("Pipeline %(pipeline)s: Continue after "
"error from publisher %(pub)s"
% {'pipeline': self, 'pub': p},
exc_info=True)
@staticmethod
def flush():
pass
class SamplePipeline(base.Pipeline):
"""Represents a pipeline for Samples."""
def _validate_volume(self, s):
volume = s.volume
if volume is None:
LOG.warning(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has no volume (volume: None), the sample will'
' be dropped'
% {'counter_name': s.name,
'resource_id': s.resource_id,
'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'}
)
return False
if not isinstance(volume, (int, float)):
try:
volume = float(volume)
except ValueError:
LOG.warning(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has volume which is not a number '
'(volume: %(counter_volume)s), the sample will be dropped'
% {'counter_name': s.name,
'resource_id': s.resource_id,
'timestamp': (
s.timestamp if s.timestamp else 'NO TIMESTAMP'),
'counter_volume': volume}
)
return False
return True
def publish_data(self, samples):
if not isinstance(samples, list):
samples = [samples]
supported = [s for s in samples if self.supported(s)
and self._validate_volume(s)]
self.sink.publish_samples(supported)
def supported(self, sample):
return self.source.support_meter(sample.name)
class SamplePipelineManager(base.PipelineManager):
pm_type = 'sample'
pm_pipeline = SamplePipeline
pm_source = SampleSource
pm_sink = SampleSink
def __init__(self, conf):
super(SamplePipelineManager, self).__init__(
conf, conf.pipeline_cfg_file)
def get_main_endpoints(self):
exts = extension.ExtensionManager(
namespace='ceilometer.sample.endpoint',
invoke_on_load=True,
invoke_args=(self.conf, self.publisher()))
return [ext.obj for ext in exts]
| {
"content_hash": "9be15e0b71df594011fc76470ba4628d",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 32.84027777777778,
"alnum_prop": 0.5730598435187143,
"repo_name": "openstack/ceilometer",
"id": "79999d0bbf2681f441f82297a9100a4f4ceb73fd",
"size": "5276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/pipeline/sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1333367"
},
{
"name": "Shell",
"bytes": "18703"
}
],
"symlink_target": ""
} |
'''
MAVLink protocol implementation (auto-generated by mavgen.py)
Generated from: common.xml
Note: this file has been auto-generated. DO NOT EDIT
'''
import struct, array, mavutil, time, json
WIRE_PROTOCOL_VERSION = "1.0"
# some base types from mavlink_types.h
MAVLINK_TYPE_CHAR = 0
MAVLINK_TYPE_UINT8_T = 1
MAVLINK_TYPE_INT8_T = 2
MAVLINK_TYPE_UINT16_T = 3
MAVLINK_TYPE_INT16_T = 4
MAVLINK_TYPE_UINT32_T = 5
MAVLINK_TYPE_INT32_T = 6
MAVLINK_TYPE_UINT64_T = 7
MAVLINK_TYPE_INT64_T = 8
MAVLINK_TYPE_FLOAT = 9
MAVLINK_TYPE_DOUBLE = 10
class MAVLink_header(object):
'''MAVLink message header'''
def __init__(self, msgId, mlen=0, seq=0, srcSystem=0, srcComponent=0):
self.mlen = mlen
self.seq = seq
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.msgId = msgId
def pack(self):
return struct.pack('BBBBBB', 254, self.mlen, self.seq,
self.srcSystem, self.srcComponent, self.msgId)
class MAVLink_message(object):
'''base MAVLink message class'''
def __init__(self, msgId, name):
self._header = MAVLink_header(msgId)
self._payload = None
self._msgbuf = None
self._crc = None
self._fieldnames = []
self._type = name
def get_msgbuf(self):
if isinstance(self._msgbuf, str):
return self._msgbuf
return self._msgbuf.tostring()
def get_header(self):
return self._header
def get_payload(self):
return self._payload
def get_crc(self):
return self._crc
def get_fieldnames(self):
return self._fieldnames
def get_type(self):
return self._type
def get_msgId(self):
return self._header.msgId
def get_srcSystem(self):
return self._header.srcSystem
def get_srcComponent(self):
return self._header.srcComponent
def get_seq(self):
return self._header.seq
def __str__(self):
ret = '%s {' % self._type
for a in self._fieldnames:
v = getattr(self, a)
ret += '%s : %s, ' % (a, v)
ret = ret[0:-2] + '}'
return ret
def to_dict(self):
d = dict({})
d['mavpackettype'] = self._type
for a in self._fieldnames:
d[a] = getattr(self, a)
return d
def to_json(self):
return json.dumps(self.to_dict)
def pack(self, mav, crc_extra, payload):
self._payload = payload
self._header = MAVLink_header(self._header.msgId, len(payload), mav.seq,
mav.srcSystem, mav.srcComponent)
self._msgbuf = self._header.pack() + payload
crc = mavutil.x25crc(self._msgbuf[1:])
if True: # using CRC extra
crc.accumulate(chr(crc_extra))
self._crc = crc.crc
self._msgbuf += struct.pack('<H', self._crc)
return self._msgbuf
# enums
# MAV_AUTOPILOT
MAV_AUTOPILOT_GENERIC = 0 # Generic autopilot, full support for everything
MAV_AUTOPILOT_PIXHAWK = 1 # PIXHAWK autopilot, http://pixhawk.ethz.ch
MAV_AUTOPILOT_SLUGS = 2 # SLUGS autopilot, http://slugsuav.soe.ucsc.edu
MAV_AUTOPILOT_ARDUPILOTMEGA = 3 # ArduPilotMega / ArduCopter, http://diydrones.com
MAV_AUTOPILOT_OPENPILOT = 4 # OpenPilot, http://openpilot.org
MAV_AUTOPILOT_GENERIC_WAYPOINTS_ONLY = 5 # Generic autopilot only supporting simple waypoints
MAV_AUTOPILOT_GENERIC_WAYPOINTS_AND_SIMPLE_NAVIGATION_ONLY = 6 # Generic autopilot supporting waypoints and other simple navigation
# commands
MAV_AUTOPILOT_GENERIC_MISSION_FULL = 7 # Generic autopilot supporting the full mission command set
MAV_AUTOPILOT_INVALID = 8 # No valid autopilot, e.g. a GCS or other MAVLink component
MAV_AUTOPILOT_PPZ = 9 # PPZ UAV - http://nongnu.org/paparazzi
MAV_AUTOPILOT_UDB = 10 # UAV Dev Board
MAV_AUTOPILOT_FP = 11 # FlexiPilot
MAV_AUTOPILOT_PX4 = 12 # PX4 Autopilot - http://pixhawk.ethz.ch/px4/
MAV_AUTOPILOT_ENUM_END = 13 #
# MAV_TYPE
MAV_TYPE_GENERIC = 0 # Generic micro air vehicle.
MAV_TYPE_FIXED_WING = 1 # Fixed wing aircraft.
MAV_TYPE_QUADROTOR = 2 # Quadrotor
MAV_TYPE_COAXIAL = 3 # Coaxial helicopter
MAV_TYPE_HELICOPTER = 4 # Normal helicopter with tail rotor.
MAV_TYPE_ANTENNA_TRACKER = 5 # Ground installation
MAV_TYPE_GCS = 6 # Operator control unit / ground control station
MAV_TYPE_AIRSHIP = 7 # Airship, controlled
MAV_TYPE_FREE_BALLOON = 8 # Free balloon, uncontrolled
MAV_TYPE_ROCKET = 9 # Rocket
MAV_TYPE_GROUND_ROVER = 10 # Ground rover
MAV_TYPE_SURFACE_BOAT = 11 # Surface vessel, boat, ship
MAV_TYPE_SUBMARINE = 12 # Submarine
MAV_TYPE_HEXAROTOR = 13 # Hexarotor
MAV_TYPE_OCTOROTOR = 14 # Octorotor
MAV_TYPE_TRICOPTER = 15 # Octorotor
MAV_TYPE_FLAPPING_WING = 16 # Flapping wing
MAV_TYPE_KITE = 17 # Flapping wing
MAV_TYPE_ENUM_END = 18 #
# MAV_MODE_FLAG
MAV_MODE_FLAG_CUSTOM_MODE_ENABLED = 1 # 0b00000001 Reserved for future use.
MAV_MODE_FLAG_TEST_ENABLED = 2 # 0b00000010 system has a test mode enabled. This flag is intended for
# temporary system tests and should not be
# used for stable implementations.
MAV_MODE_FLAG_AUTO_ENABLED = 4 # 0b00000100 autonomous mode enabled, system finds its own goal
# positions. Guided flag can be set or not,
# depends on the actual implementation.
MAV_MODE_FLAG_GUIDED_ENABLED = 8 # 0b00001000 guided mode enabled, system flies MISSIONs / mission items.
MAV_MODE_FLAG_STABILIZE_ENABLED = 16 # 0b00010000 system stabilizes electronically its attitude (and
# optionally position). It needs however
# further control inputs to move around.
MAV_MODE_FLAG_HIL_ENABLED = 32 # 0b00100000 hardware in the loop simulation. All motors / actuators are
# blocked, but internal software is full
# operational.
MAV_MODE_FLAG_MANUAL_INPUT_ENABLED = 64 # 0b01000000 remote control input is enabled.
MAV_MODE_FLAG_SAFETY_ARMED = 128 # 0b10000000 MAV safety set to armed. Motors are enabled / running / can
# start. Ready to fly.
MAV_MODE_FLAG_ENUM_END = 129 #
# MAV_MODE_FLAG_DECODE_POSITION
MAV_MODE_FLAG_DECODE_POSITION_CUSTOM_MODE = 1 # Eighth bit: 00000001
MAV_MODE_FLAG_DECODE_POSITION_TEST = 2 # Seventh bit: 00000010
MAV_MODE_FLAG_DECODE_POSITION_AUTO = 4 # Sixt bit: 00000100
MAV_MODE_FLAG_DECODE_POSITION_GUIDED = 8 # Fifth bit: 00001000
MAV_MODE_FLAG_DECODE_POSITION_STABILIZE = 16 # Fourth bit: 00010000
MAV_MODE_FLAG_DECODE_POSITION_HIL = 32 # Third bit: 00100000
MAV_MODE_FLAG_DECODE_POSITION_MANUAL = 64 # Second bit: 01000000
MAV_MODE_FLAG_DECODE_POSITION_SAFETY = 128 # First bit: 10000000
MAV_MODE_FLAG_DECODE_POSITION_ENUM_END = 129 #
# MAV_GOTO
MAV_GOTO_DO_HOLD = 0 # Hold at the current position.
MAV_GOTO_DO_CONTINUE = 1 # Continue with the next item in mission execution.
MAV_GOTO_HOLD_AT_CURRENT_POSITION = 2 # Hold at the current position of the system
MAV_GOTO_HOLD_AT_SPECIFIED_POSITION = 3 # Hold at the position specified in the parameters of the DO_HOLD action
MAV_GOTO_ENUM_END = 4 #
# MAV_MODE
MAV_MODE_PREFLIGHT = 0 # System is not ready to fly, booting, calibrating, etc. No flag is set.
MAV_MODE_MANUAL_DISARMED = 64 # System is allowed to be active, under manual (RC) control, no
# stabilization
MAV_MODE_TEST_DISARMED = 66 # UNDEFINED mode. This solely depends on the autopilot - use with
# caution, intended for developers only.
MAV_MODE_STABILIZE_DISARMED = 80 # System is allowed to be active, under assisted RC control.
MAV_MODE_GUIDED_DISARMED = 88 # System is allowed to be active, under autonomous control, manual
# setpoint
MAV_MODE_AUTO_DISARMED = 92 # System is allowed to be active, under autonomous control and
# navigation (the trajectory is decided
# onboard and not pre-programmed by MISSIONs)
MAV_MODE_MANUAL_ARMED = 192 # System is allowed to be active, under manual (RC) control, no
# stabilization
MAV_MODE_TEST_ARMED = 194 # UNDEFINED mode. This solely depends on the autopilot - use with
# caution, intended for developers only.
MAV_MODE_STABILIZE_ARMED = 208 # System is allowed to be active, under assisted RC control.
MAV_MODE_GUIDED_ARMED = 216 # System is allowed to be active, under autonomous control, manual
# setpoint
MAV_MODE_AUTO_ARMED = 220 # System is allowed to be active, under autonomous control and
# navigation (the trajectory is decided
# onboard and not pre-programmed by MISSIONs)
MAV_MODE_ENUM_END = 221 #
# MAV_STATE
MAV_STATE_UNINIT = 0 # Uninitialized system, state is unknown.
MAV_STATE_BOOT = 1 # System is booting up.
MAV_STATE_CALIBRATING = 2 # System is calibrating and not flight-ready.
MAV_STATE_STANDBY = 3 # System is grounded and on standby. It can be launched any time.
MAV_STATE_ACTIVE = 4 # System is active and might be already airborne. Motors are engaged.
MAV_STATE_CRITICAL = 5 # System is in a non-normal flight mode. It can however still navigate.
MAV_STATE_EMERGENCY = 6 # System is in a non-normal flight mode. It lost control over parts or
# over the whole airframe. It is in mayday and
# going down.
MAV_STATE_POWEROFF = 7 # System just initialized its power-down sequence, will shut down now.
MAV_STATE_ENUM_END = 8 #
# MAV_COMPONENT
MAV_COMP_ID_ALL = 0 #
MAV_COMP_ID_CAMERA = 100 #
MAV_COMP_ID_SERVO1 = 140 #
MAV_COMP_ID_SERVO2 = 141 #
MAV_COMP_ID_SERVO3 = 142 #
MAV_COMP_ID_SERVO4 = 143 #
MAV_COMP_ID_SERVO5 = 144 #
MAV_COMP_ID_SERVO6 = 145 #
MAV_COMP_ID_SERVO7 = 146 #
MAV_COMP_ID_SERVO8 = 147 #
MAV_COMP_ID_SERVO9 = 148 #
MAV_COMP_ID_SERVO10 = 149 #
MAV_COMP_ID_SERVO11 = 150 #
MAV_COMP_ID_SERVO12 = 151 #
MAV_COMP_ID_SERVO13 = 152 #
MAV_COMP_ID_SERVO14 = 153 #
MAV_COMP_ID_MAPPER = 180 #
MAV_COMP_ID_MISSIONPLANNER = 190 #
MAV_COMP_ID_PATHPLANNER = 195 #
MAV_COMP_ID_IMU = 200 #
MAV_COMP_ID_IMU_2 = 201 #
MAV_COMP_ID_IMU_3 = 202 #
MAV_COMP_ID_GPS = 220 #
MAV_COMP_ID_UDP_BRIDGE = 240 #
MAV_COMP_ID_UART_BRIDGE = 241 #
MAV_COMP_ID_SYSTEM_CONTROL = 250 #
MAV_COMPONENT_ENUM_END = 251 #
# MAV_FRAME
MAV_FRAME_GLOBAL = 0 # Global coordinate frame, WGS84 coordinate system. First value / x:
# latitude, second value / y: longitude, third
# value / z: positive altitude over mean sea
# level (MSL)
MAV_FRAME_LOCAL_NED = 1 # Local coordinate frame, Z-up (x: north, y: east, z: down).
MAV_FRAME_MISSION = 2 # NOT a coordinate frame, indicates a mission command.
MAV_FRAME_GLOBAL_RELATIVE_ALT = 3 # Global coordinate frame, WGS84 coordinate system, relative altitude
# over ground with respect to the home
# position. First value / x: latitude, second
# value / y: longitude, third value / z:
# positive altitude with 0 being at the
# altitude of the home location.
MAV_FRAME_LOCAL_ENU = 4 # Local coordinate frame, Z-down (x: east, y: north, z: up)
MAV_FRAME_ENUM_END = 5 #
# MAVLINK_DATA_STREAM_TYPE
MAVLINK_DATA_STREAM_IMG_JPEG = 1 #
MAVLINK_DATA_STREAM_IMG_BMP = 2 #
MAVLINK_DATA_STREAM_IMG_RAW8U = 3 #
MAVLINK_DATA_STREAM_IMG_RAW32U = 4 #
MAVLINK_DATA_STREAM_IMG_PGM = 5 #
MAVLINK_DATA_STREAM_IMG_PNG = 6 #
MAVLINK_DATA_STREAM_TYPE_ENUM_END = 7 #
# MAV_CMD
MAV_CMD_NAV_WAYPOINT = 16 # Navigate to MISSION.
MAV_CMD_NAV_LOITER_UNLIM = 17 # Loiter around this MISSION an unlimited amount of time
MAV_CMD_NAV_LOITER_TURNS = 18 # Loiter around this MISSION for X turns
MAV_CMD_NAV_LOITER_TIME = 19 # Loiter around this MISSION for X seconds
MAV_CMD_NAV_RETURN_TO_LAUNCH = 20 # Return to launch location
MAV_CMD_NAV_LAND = 21 # Land at location
MAV_CMD_NAV_TAKEOFF = 22 # Takeoff from ground / hand
MAV_CMD_NAV_ROI = 80 # Sets the region of interest (ROI) for a sensor set or the vehicle
# itself. This can then be used by the
# vehicles control system to control the
# vehicle attitude and the attitude of various
# sensors such as cameras.
MAV_CMD_NAV_PATHPLANNING = 81 # Control autonomous path planning on the MAV.
MAV_CMD_NAV_LAST = 95 # NOP - This command is only used to mark the upper limit of the
# NAV/ACTION commands in the enumeration
MAV_CMD_CONDITION_DELAY = 112 # Delay mission state machine.
MAV_CMD_CONDITION_CHANGE_ALT = 113 # Ascend/descend at rate. Delay mission state machine until desired
# altitude reached.
MAV_CMD_CONDITION_DISTANCE = 114 # Delay mission state machine until within desired distance of next NAV
# point.
MAV_CMD_CONDITION_YAW = 115 # Reach a certain target angle.
MAV_CMD_CONDITION_LAST = 159 # NOP - This command is only used to mark the upper limit of the
# CONDITION commands in the enumeration
MAV_CMD_DO_SET_MODE = 176 # Set system mode.
MAV_CMD_DO_JUMP = 177 # Jump to the desired command in the mission list. Repeat this action
# only the specified number of times
MAV_CMD_DO_CHANGE_SPEED = 178 # Change speed and/or throttle set points.
MAV_CMD_DO_SET_HOME = 179 # Changes the home location either to the current location or a
# specified location.
MAV_CMD_DO_SET_PARAMETER = 180 # Set a system parameter. Caution! Use of this command requires
# knowledge of the numeric enumeration value
# of the parameter.
MAV_CMD_DO_SET_RELAY = 181 # Set a relay to a condition.
MAV_CMD_DO_REPEAT_RELAY = 182 # Cycle a relay on and off for a desired number of cyles with a desired
# period.
MAV_CMD_DO_SET_SERVO = 183 # Set a servo to a desired PWM value.
MAV_CMD_DO_REPEAT_SERVO = 184 # Cycle a between its nominal setting and a desired PWM for a desired
# number of cycles with a desired period.
MAV_CMD_DO_CONTROL_VIDEO = 200 # Control onboard camera system.
MAV_CMD_DO_LAST = 240 # NOP - This command is only used to mark the upper limit of the DO
# commands in the enumeration
MAV_CMD_PREFLIGHT_CALIBRATION = 241 # Trigger calibration. This command will be only accepted if in pre-
# flight mode.
MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS = 242 # Set sensor offsets. This command will be only accepted if in pre-
# flight mode.
MAV_CMD_PREFLIGHT_STORAGE = 245 # Request storage of different parameter values and logs. This command
# will be only accepted if in pre-flight mode.
MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN = 246 # Request the reboot or shutdown of system components.
MAV_CMD_OVERRIDE_GOTO = 252 # Hold / continue the current action
MAV_CMD_MISSION_START = 300 # start running a mission
MAV_CMD_COMPONENT_ARM_DISARM = 400 # Arms / Disarms a component
MAV_CMD_ENUM_END = 401 #
# MAV_DATA_STREAM
MAV_DATA_STREAM_ALL = 0 # Enable all data streams
MAV_DATA_STREAM_RAW_SENSORS = 1 # Enable IMU_RAW, GPS_RAW, GPS_STATUS packets.
MAV_DATA_STREAM_EXTENDED_STATUS = 2 # Enable GPS_STATUS, CONTROL_STATUS, AUX_STATUS
MAV_DATA_STREAM_RC_CHANNELS = 3 # Enable RC_CHANNELS_SCALED, RC_CHANNELS_RAW, SERVO_OUTPUT_RAW
MAV_DATA_STREAM_RAW_CONTROLLER = 4 # Enable ATTITUDE_CONTROLLER_OUTPUT, POSITION_CONTROLLER_OUTPUT,
# NAV_CONTROLLER_OUTPUT.
MAV_DATA_STREAM_POSITION = 6 # Enable LOCAL_POSITION, GLOBAL_POSITION/GLOBAL_POSITION_INT messages.
MAV_DATA_STREAM_EXTRA1 = 10 # Dependent on the autopilot
MAV_DATA_STREAM_EXTRA2 = 11 # Dependent on the autopilot
MAV_DATA_STREAM_EXTRA3 = 12 # Dependent on the autopilot
MAV_DATA_STREAM_ENUM_END = 13 #
# MAV_ROI
MAV_ROI_NONE = 0 # No region of interest.
MAV_ROI_WPNEXT = 1 # Point toward next MISSION.
MAV_ROI_WPINDEX = 2 # Point toward given MISSION.
MAV_ROI_LOCATION = 3 # Point toward fixed location.
MAV_ROI_TARGET = 4 # Point toward of given id.
MAV_ROI_ENUM_END = 5 #
# MAV_CMD_ACK
MAV_CMD_ACK_OK = 1 # Command / mission item is ok.
MAV_CMD_ACK_ERR_FAIL = 2 # Generic error message if none of the other reasons fails or if no
# detailed error reporting is implemented.
MAV_CMD_ACK_ERR_ACCESS_DENIED = 3 # The system is refusing to accept this command from this source /
# communication partner.
MAV_CMD_ACK_ERR_NOT_SUPPORTED = 4 # Command or mission item is not supported, other commands would be
# accepted.
MAV_CMD_ACK_ERR_COORDINATE_FRAME_NOT_SUPPORTED = 5 # The coordinate frame of this command / mission item is not supported.
MAV_CMD_ACK_ERR_COORDINATES_OUT_OF_RANGE = 6 # The coordinate frame of this command is ok, but he coordinate values
# exceed the safety limits of this system.
# This is a generic error, please use the more
# specific error messages below if possible.
MAV_CMD_ACK_ERR_X_LAT_OUT_OF_RANGE = 7 # The X or latitude value is out of range.
MAV_CMD_ACK_ERR_Y_LON_OUT_OF_RANGE = 8 # The Y or longitude value is out of range.
MAV_CMD_ACK_ERR_Z_ALT_OUT_OF_RANGE = 9 # The Z or altitude value is out of range.
MAV_CMD_ACK_ENUM_END = 10 #
# MAV_PARAM_TYPE
MAV_PARAM_TYPE_UINT8 = 1 # 8-bit unsigned integer
MAV_PARAM_TYPE_INT8 = 2 # 8-bit signed integer
MAV_PARAM_TYPE_UINT16 = 3 # 16-bit unsigned integer
MAV_PARAM_TYPE_INT16 = 4 # 16-bit signed integer
MAV_PARAM_TYPE_UINT32 = 5 # 32-bit unsigned integer
MAV_PARAM_TYPE_INT32 = 6 # 32-bit signed integer
MAV_PARAM_TYPE_UINT64 = 7 # 64-bit unsigned integer
MAV_PARAM_TYPE_INT64 = 8 # 64-bit signed integer
MAV_PARAM_TYPE_REAL32 = 9 # 32-bit floating-point
MAV_PARAM_TYPE_REAL64 = 10 # 64-bit floating-point
MAV_PARAM_TYPE_ENUM_END = 11 #
# MAV_RESULT
MAV_RESULT_ACCEPTED = 0 # Command ACCEPTED and EXECUTED
MAV_RESULT_TEMPORARILY_REJECTED = 1 # Command TEMPORARY REJECTED/DENIED
MAV_RESULT_DENIED = 2 # Command PERMANENTLY DENIED
MAV_RESULT_UNSUPPORTED = 3 # Command UNKNOWN/UNSUPPORTED
MAV_RESULT_FAILED = 4 # Command executed, but failed
MAV_RESULT_ENUM_END = 5 #
# MAV_MISSION_RESULT
MAV_MISSION_ACCEPTED = 0 # mission accepted OK
MAV_MISSION_ERROR = 1 # generic error / not accepting mission commands at all right now
MAV_MISSION_UNSUPPORTED_FRAME = 2 # coordinate frame is not supported
MAV_MISSION_UNSUPPORTED = 3 # command is not supported
MAV_MISSION_NO_SPACE = 4 # mission item exceeds storage space
MAV_MISSION_INVALID = 5 # one of the parameters has an invalid value
MAV_MISSION_INVALID_PARAM1 = 6 # param1 has an invalid value
MAV_MISSION_INVALID_PARAM2 = 7 # param2 has an invalid value
MAV_MISSION_INVALID_PARAM3 = 8 # param3 has an invalid value
MAV_MISSION_INVALID_PARAM4 = 9 # param4 has an invalid value
MAV_MISSION_INVALID_PARAM5_X = 10 # x/param5 has an invalid value
MAV_MISSION_INVALID_PARAM6_Y = 11 # y/param6 has an invalid value
MAV_MISSION_INVALID_PARAM7 = 12 # param7 has an invalid value
MAV_MISSION_INVALID_SEQUENCE = 13 # received waypoint out of sequence
MAV_MISSION_DENIED = 14 # not accepting any mission commands from this communication partner
MAV_MISSION_RESULT_ENUM_END = 15 #
# MAV_SEVERITY
MAV_SEVERITY_EMERGENCY = 0 # System is unusable. This is a "panic" condition.
MAV_SEVERITY_ALERT = 1 # Action should be taken immediately. Indicates error in non-critical
# systems.
MAV_SEVERITY_CRITICAL = 2 # Action must be taken immediately. Indicates failure in a primary
# system.
MAV_SEVERITY_ERROR = 3 # Indicates an error in secondary/redundant systems.
MAV_SEVERITY_WARNING = 4 # Indicates about a possible future error if this is not resolved within
# a given timeframe. Example would be a low
# battery warning.
MAV_SEVERITY_NOTICE = 5 # An unusual event has occured, though not an error condition. This
# should be investigated for the root cause.
MAV_SEVERITY_INFO = 6 # Normal operational messages. Useful for logging. No action is required
# for these messages.
MAV_SEVERITY_DEBUG = 7 # Useful non-operational messages that can assist in debugging. These
# should not occur during normal operation.
MAV_SEVERITY_ENUM_END = 8 #
# message IDs
MAVLINK_MSG_ID_BAD_DATA = -1
MAVLINK_MSG_ID_HEARTBEAT = 0
MAVLINK_MSG_ID_SYS_STATUS = 1
MAVLINK_MSG_ID_SYSTEM_TIME = 2
MAVLINK_MSG_ID_PING = 4
MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL = 5
MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL_ACK = 6
MAVLINK_MSG_ID_AUTH_KEY = 7
MAVLINK_MSG_ID_SET_MODE = 11
MAVLINK_MSG_ID_PARAM_REQUEST_READ = 20
MAVLINK_MSG_ID_PARAM_REQUEST_LIST = 21
MAVLINK_MSG_ID_PARAM_VALUE = 22
MAVLINK_MSG_ID_PARAM_SET = 23
MAVLINK_MSG_ID_GPS_RAW_INT = 24
MAVLINK_MSG_ID_GPS_STATUS = 25
MAVLINK_MSG_ID_SCALED_IMU = 26
MAVLINK_MSG_ID_RAW_IMU = 27
MAVLINK_MSG_ID_RAW_PRESSURE = 28
MAVLINK_MSG_ID_SCALED_PRESSURE = 29
MAVLINK_MSG_ID_ATTITUDE = 30
MAVLINK_MSG_ID_ATTITUDE_QUATERNION = 31
MAVLINK_MSG_ID_LOCAL_POSITION_NED = 32
MAVLINK_MSG_ID_GLOBAL_POSITION_INT = 33
MAVLINK_MSG_ID_RC_CHANNELS_SCALED = 34
MAVLINK_MSG_ID_RC_CHANNELS_RAW = 35
MAVLINK_MSG_ID_SERVO_OUTPUT_RAW = 36
MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST = 37
MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST = 38
MAVLINK_MSG_ID_MISSION_ITEM = 39
MAVLINK_MSG_ID_MISSION_REQUEST = 40
MAVLINK_MSG_ID_MISSION_SET_CURRENT = 41
MAVLINK_MSG_ID_MISSION_CURRENT = 42
MAVLINK_MSG_ID_MISSION_REQUEST_LIST = 43
MAVLINK_MSG_ID_MISSION_COUNT = 44
MAVLINK_MSG_ID_MISSION_CLEAR_ALL = 45
MAVLINK_MSG_ID_MISSION_ITEM_REACHED = 46
MAVLINK_MSG_ID_MISSION_ACK = 47
MAVLINK_MSG_ID_SET_GPS_GLOBAL_ORIGIN = 48
MAVLINK_MSG_ID_GPS_GLOBAL_ORIGIN = 49
MAVLINK_MSG_ID_SET_LOCAL_POSITION_SETPOINT = 50
MAVLINK_MSG_ID_LOCAL_POSITION_SETPOINT = 51
MAVLINK_MSG_ID_GLOBAL_POSITION_SETPOINT_INT = 52
MAVLINK_MSG_ID_SET_GLOBAL_POSITION_SETPOINT_INT = 53
MAVLINK_MSG_ID_SAFETY_SET_ALLOWED_AREA = 54
MAVLINK_MSG_ID_SAFETY_ALLOWED_AREA = 55
MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_THRUST = 56
MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_SPEED_THRUST = 57
MAVLINK_MSG_ID_ROLL_PITCH_YAW_THRUST_SETPOINT = 58
MAVLINK_MSG_ID_ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT = 59
MAVLINK_MSG_ID_SET_QUAD_MOTORS_SETPOINT = 60
MAVLINK_MSG_ID_SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST = 61
MAVLINK_MSG_ID_NAV_CONTROLLER_OUTPUT = 62
MAVLINK_MSG_ID_SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST = 63
MAVLINK_MSG_ID_STATE_CORRECTION = 64
MAVLINK_MSG_ID_REQUEST_DATA_STREAM = 66
MAVLINK_MSG_ID_DATA_STREAM = 67
MAVLINK_MSG_ID_MANUAL_CONTROL = 69
MAVLINK_MSG_ID_RC_CHANNELS_OVERRIDE = 70
MAVLINK_MSG_ID_VFR_HUD = 74
MAVLINK_MSG_ID_COMMAND_LONG = 76
MAVLINK_MSG_ID_COMMAND_ACK = 77
MAVLINK_MSG_ID_ROLL_PITCH_YAW_RATES_THRUST_SETPOINT = 80
MAVLINK_MSG_ID_MANUAL_SETPOINT = 81
MAVLINK_MSG_ID_LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET = 89
MAVLINK_MSG_ID_HIL_STATE = 90
MAVLINK_MSG_ID_HIL_CONTROLS = 91
MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW = 92
MAVLINK_MSG_ID_OPTICAL_FLOW = 100
MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE = 101
MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE = 102
MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE = 103
MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE = 104
MAVLINK_MSG_ID_HIGHRES_IMU = 105
MAVLINK_MSG_ID_FILE_TRANSFER_START = 110
MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST = 111
MAVLINK_MSG_ID_FILE_TRANSFER_RES = 112
MAVLINK_MSG_ID_BATTERY_STATUS = 147
MAVLINK_MSG_ID_SETPOINT_8DOF = 148
MAVLINK_MSG_ID_SETPOINT_6DOF = 149
MAVLINK_MSG_ID_MEMORY_VECT = 249
MAVLINK_MSG_ID_DEBUG_VECT = 250
MAVLINK_MSG_ID_NAMED_VALUE_FLOAT = 251
MAVLINK_MSG_ID_NAMED_VALUE_INT = 252
MAVLINK_MSG_ID_STATUSTEXT = 253
MAVLINK_MSG_ID_DEBUG = 254
class MAVLink_heartbeat_message(MAVLink_message):
'''
The heartbeat message shows that a system is present and
responding. The type of the MAV and Autopilot hardware allow
the receiving system to treat further messages from this
system appropriate (e.g. by laying out the user interface
based on the autopilot).
'''
def __init__(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HEARTBEAT, 'HEARTBEAT')
self._fieldnames = ['type', 'autopilot', 'base_mode', 'custom_mode', 'system_status', 'mavlink_version']
self.type = type
self.autopilot = autopilot
self.base_mode = base_mode
self.custom_mode = custom_mode
self.system_status = system_status
self.mavlink_version = mavlink_version
def pack(self, mav):
return MAVLink_message.pack(self, mav, 50, struct.pack('<IBBBBB', self.custom_mode, self.type, self.autopilot, self.base_mode, self.system_status, self.mavlink_version))
class MAVLink_sys_status_message(MAVLink_message):
'''
The general system state. If the system is following the
MAVLink standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is either
LOCKED (motors shut down and locked), MANUAL (system under RC
control), GUIDED (system with autonomous position control,
position setpoint controlled manually) or AUTO (system guided
by path/waypoint planner). The NAV_MODE defined the current
flight state: LIFTOFF (often an open-loop maneuver), LANDING,
WAYPOINTS or VECTOR. This represents the internal navigation
state machine. The system status shows wether the system is
currently active or not and if an emergency occured. During
the CRITICAL and EMERGENCY states the MAV is still considered
to be active, but should start emergency procedures
autonomously. After a failure occured it should first move
from active to critical to allow manual intervention and then
move to emergency after a certain timeout.
'''
def __init__(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SYS_STATUS, 'SYS_STATUS')
self._fieldnames = ['onboard_control_sensors_present', 'onboard_control_sensors_enabled', 'onboard_control_sensors_health', 'load', 'voltage_battery', 'current_battery', 'battery_remaining', 'drop_rate_comm', 'errors_comm', 'errors_count1', 'errors_count2', 'errors_count3', 'errors_count4']
self.onboard_control_sensors_present = onboard_control_sensors_present
self.onboard_control_sensors_enabled = onboard_control_sensors_enabled
self.onboard_control_sensors_health = onboard_control_sensors_health
self.load = load
self.voltage_battery = voltage_battery
self.current_battery = current_battery
self.battery_remaining = battery_remaining
self.drop_rate_comm = drop_rate_comm
self.errors_comm = errors_comm
self.errors_count1 = errors_count1
self.errors_count2 = errors_count2
self.errors_count3 = errors_count3
self.errors_count4 = errors_count4
def pack(self, mav):
return MAVLink_message.pack(self, mav, 124, struct.pack('<IIIHHhHHHHHHb', self.onboard_control_sensors_present, self.onboard_control_sensors_enabled, self.onboard_control_sensors_health, self.load, self.voltage_battery, self.current_battery, self.drop_rate_comm, self.errors_comm, self.errors_count1, self.errors_count2, self.errors_count3, self.errors_count4, self.battery_remaining))
class MAVLink_system_time_message(MAVLink_message):
'''
The system time is the time of the master clock, typically the
computer clock of the main onboard computer.
'''
def __init__(self, time_unix_usec, time_boot_ms):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SYSTEM_TIME, 'SYSTEM_TIME')
self._fieldnames = ['time_unix_usec', 'time_boot_ms']
self.time_unix_usec = time_unix_usec
self.time_boot_ms = time_boot_ms
def pack(self, mav):
return MAVLink_message.pack(self, mav, 137, struct.pack('<QI', self.time_unix_usec, self.time_boot_ms))
class MAVLink_ping_message(MAVLink_message):
'''
A ping message either requesting or responding to a ping. This
allows to measure the system latencies, including serial port,
radio modem and UDP connections.
'''
def __init__(self, time_usec, seq, target_system, target_component):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_PING, 'PING')
self._fieldnames = ['time_usec', 'seq', 'target_system', 'target_component']
self.time_usec = time_usec
self.seq = seq
self.target_system = target_system
self.target_component = target_component
def pack(self, mav):
return MAVLink_message.pack(self, mav, 237, struct.pack('<QIBB', self.time_usec, self.seq, self.target_system, self.target_component))
class MAVLink_change_operator_control_message(MAVLink_message):
'''
Request to control this MAV
'''
def __init__(self, target_system, control_request, version, passkey):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL, 'CHANGE_OPERATOR_CONTROL')
self._fieldnames = ['target_system', 'control_request', 'version', 'passkey']
self.target_system = target_system
self.control_request = control_request
self.version = version
self.passkey = passkey
def pack(self, mav):
return MAVLink_message.pack(self, mav, 217, struct.pack('<BBB25s', self.target_system, self.control_request, self.version, self.passkey))
class MAVLink_change_operator_control_ack_message(MAVLink_message):
'''
Accept / deny control of this MAV
'''
def __init__(self, gcs_system_id, control_request, ack):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL_ACK, 'CHANGE_OPERATOR_CONTROL_ACK')
self._fieldnames = ['gcs_system_id', 'control_request', 'ack']
self.gcs_system_id = gcs_system_id
self.control_request = control_request
self.ack = ack
def pack(self, mav):
return MAVLink_message.pack(self, mav, 104, struct.pack('<BBB', self.gcs_system_id, self.control_request, self.ack))
class MAVLink_auth_key_message(MAVLink_message):
'''
Emit an encrypted signature / key identifying this system.
PLEASE NOTE: This protocol has been kept simple, so
transmitting the key requires an encrypted channel for true
safety.
'''
def __init__(self, key):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_AUTH_KEY, 'AUTH_KEY')
self._fieldnames = ['key']
self.key = key
def pack(self, mav):
return MAVLink_message.pack(self, mav, 119, struct.pack('<32s', self.key))
class MAVLink_set_mode_message(MAVLink_message):
'''
Set the system mode, as defined by enum MAV_MODE. There is no
target component id as the mode is by definition for the
overall aircraft, not only for one component.
'''
def __init__(self, target_system, base_mode, custom_mode):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_MODE, 'SET_MODE')
self._fieldnames = ['target_system', 'base_mode', 'custom_mode']
self.target_system = target_system
self.base_mode = base_mode
self.custom_mode = custom_mode
def pack(self, mav):
return MAVLink_message.pack(self, mav, 89, struct.pack('<IBB', self.custom_mode, self.target_system, self.base_mode))
class MAVLink_param_request_read_message(MAVLink_message):
'''
Request to read the onboard parameter with the param_id string
id. Onboard parameters are stored as key[const char*] ->
value[float]. This allows to send a parameter to any other
component (such as the GCS) without the need of previous
knowledge of possible parameter names. Thus the same GCS can
store different parameters for different autopilots. See also
http://qgroundcontrol.org/parameter_interface for a full
documentation of QGroundControl and IMU code.
'''
def __init__(self, target_system, target_component, param_id, param_index):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_REQUEST_READ, 'PARAM_REQUEST_READ')
self._fieldnames = ['target_system', 'target_component', 'param_id', 'param_index']
self.target_system = target_system
self.target_component = target_component
self.param_id = param_id
self.param_index = param_index
def pack(self, mav):
return MAVLink_message.pack(self, mav, 214, struct.pack('<hBB16s', self.param_index, self.target_system, self.target_component, self.param_id))
class MAVLink_param_request_list_message(MAVLink_message):
'''
Request all parameters of this component. After his request,
all parameters are emitted.
'''
def __init__(self, target_system, target_component):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_REQUEST_LIST, 'PARAM_REQUEST_LIST')
self._fieldnames = ['target_system', 'target_component']
self.target_system = target_system
self.target_component = target_component
def pack(self, mav):
return MAVLink_message.pack(self, mav, 159, struct.pack('<BB', self.target_system, self.target_component))
class MAVLink_param_value_message(MAVLink_message):
'''
Emit the value of a onboard parameter. The inclusion of
param_count and param_index in the message allows the
recipient to keep track of received parameters and allows him
to re-request missing parameters after a loss or timeout.
'''
def __init__(self, param_id, param_value, param_type, param_count, param_index):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_VALUE, 'PARAM_VALUE')
self._fieldnames = ['param_id', 'param_value', 'param_type', 'param_count', 'param_index']
self.param_id = param_id
self.param_value = param_value
self.param_type = param_type
self.param_count = param_count
self.param_index = param_index
def pack(self, mav):
return MAVLink_message.pack(self, mav, 220, struct.pack('<fHH16sB', self.param_value, self.param_count, self.param_index, self.param_id, self.param_type))
class MAVLink_param_set_message(MAVLink_message):
'''
Set a parameter value TEMPORARILY to RAM. It will be reset to
default on system reboot. Send the ACTION
MAV_ACTION_STORAGE_WRITE to PERMANENTLY write the RAM contents
to EEPROM. IMPORTANT: The receiving component should
acknowledge the new parameter value by sending a param_value
message to all communication partners. This will also ensure
that multiple GCS all have an up-to-date list of all
parameters. If the sending GCS did not receive a PARAM_VALUE
message within its timeout time, it should re-send the
PARAM_SET message.
'''
def __init__(self, target_system, target_component, param_id, param_value, param_type):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_SET, 'PARAM_SET')
self._fieldnames = ['target_system', 'target_component', 'param_id', 'param_value', 'param_type']
self.target_system = target_system
self.target_component = target_component
self.param_id = param_id
self.param_value = param_value
self.param_type = param_type
def pack(self, mav):
return MAVLink_message.pack(self, mav, 168, struct.pack('<fBB16sB', self.param_value, self.target_system, self.target_component, self.param_id, self.param_type))
class MAVLink_gps_raw_int_message(MAVLink_message):
'''
The global position, as returned by the Global Positioning
System (GPS). This is NOT the global position
estimate of the sytem, but rather a RAW sensor value. See
message GLOBAL_POSITION for the global position estimate.
Coordinate frame is right-handed, Z-axis up (GPS frame).
'''
def __init__(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GPS_RAW_INT, 'GPS_RAW_INT')
self._fieldnames = ['time_usec', 'fix_type', 'lat', 'lon', 'alt', 'eph', 'epv', 'vel', 'cog', 'satellites_visible']
self.time_usec = time_usec
self.fix_type = fix_type
self.lat = lat
self.lon = lon
self.alt = alt
self.eph = eph
self.epv = epv
self.vel = vel
self.cog = cog
self.satellites_visible = satellites_visible
def pack(self, mav):
return MAVLink_message.pack(self, mav, 24, struct.pack('<QiiiHHHHBB', self.time_usec, self.lat, self.lon, self.alt, self.eph, self.epv, self.vel, self.cog, self.fix_type, self.satellites_visible))
class MAVLink_gps_status_message(MAVLink_message):
'''
The positioning status, as reported by GPS. This message is
intended to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION for the
global position estimate. This message can contain information
for up to 20 satellites.
'''
def __init__(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GPS_STATUS, 'GPS_STATUS')
self._fieldnames = ['satellites_visible', 'satellite_prn', 'satellite_used', 'satellite_elevation', 'satellite_azimuth', 'satellite_snr']
self.satellites_visible = satellites_visible
self.satellite_prn = satellite_prn
self.satellite_used = satellite_used
self.satellite_elevation = satellite_elevation
self.satellite_azimuth = satellite_azimuth
self.satellite_snr = satellite_snr
def pack(self, mav):
return MAVLink_message.pack(self, mav, 23, struct.pack('<B20s20s20s20s20s', self.satellites_visible, self.satellite_prn, self.satellite_used, self.satellite_elevation, self.satellite_azimuth, self.satellite_snr))
class MAVLink_scaled_imu_message(MAVLink_message):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This
message should contain the scaled values to the described
units
'''
def __init__(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SCALED_IMU, 'SCALED_IMU')
self._fieldnames = ['time_boot_ms', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag']
self.time_boot_ms = time_boot_ms
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
self.xgyro = xgyro
self.ygyro = ygyro
self.zgyro = zgyro
self.xmag = xmag
self.ymag = ymag
self.zmag = zmag
def pack(self, mav):
return MAVLink_message.pack(self, mav, 170, struct.pack('<Ihhhhhhhhh', self.time_boot_ms, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag))
class MAVLink_raw_imu_message(MAVLink_message):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This
message should always contain the true raw values without any
scaling to allow data capture and system debugging.
'''
def __init__(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RAW_IMU, 'RAW_IMU')
self._fieldnames = ['time_usec', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag']
self.time_usec = time_usec
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
self.xgyro = xgyro
self.ygyro = ygyro
self.zgyro = zgyro
self.xmag = xmag
self.ymag = ymag
self.zmag = zmag
def pack(self, mav):
return MAVLink_message.pack(self, mav, 144, struct.pack('<Qhhhhhhhhh', self.time_usec, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag))
class MAVLink_raw_pressure_message(MAVLink_message):
'''
The RAW pressure readings for the typical setup of one
absolute pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
'''
def __init__(self, time_usec, press_abs, press_diff1, press_diff2, temperature):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RAW_PRESSURE, 'RAW_PRESSURE')
self._fieldnames = ['time_usec', 'press_abs', 'press_diff1', 'press_diff2', 'temperature']
self.time_usec = time_usec
self.press_abs = press_abs
self.press_diff1 = press_diff1
self.press_diff2 = press_diff2
self.temperature = temperature
def pack(self, mav):
return MAVLink_message.pack(self, mav, 67, struct.pack('<Qhhhh', self.time_usec, self.press_abs, self.press_diff1, self.press_diff2, self.temperature))
class MAVLink_scaled_pressure_message(MAVLink_message):
'''
The pressure readings for the typical setup of one absolute
and differential pressure sensor. The units are as specified
in each field.
'''
def __init__(self, time_boot_ms, press_abs, press_diff, temperature):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SCALED_PRESSURE, 'SCALED_PRESSURE')
self._fieldnames = ['time_boot_ms', 'press_abs', 'press_diff', 'temperature']
self.time_boot_ms = time_boot_ms
self.press_abs = press_abs
self.press_diff = press_diff
self.temperature = temperature
def pack(self, mav):
return MAVLink_message.pack(self, mav, 115, struct.pack('<Iffh', self.time_boot_ms, self.press_abs, self.press_diff, self.temperature))
class MAVLink_attitude_message(MAVLink_message):
'''
The attitude in the aeronautical frame (right-handed, Z-down,
X-front, Y-right).
'''
def __init__(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ATTITUDE, 'ATTITUDE')
self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'rollspeed', 'pitchspeed', 'yawspeed']
self.time_boot_ms = time_boot_ms
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.rollspeed = rollspeed
self.pitchspeed = pitchspeed
self.yawspeed = yawspeed
def pack(self, mav):
return MAVLink_message.pack(self, mav, 39, struct.pack('<Iffffff', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.rollspeed, self.pitchspeed, self.yawspeed))
class MAVLink_attitude_quaternion_message(MAVLink_message):
'''
The attitude in the aeronautical frame (right-handed, Z-down,
X-front, Y-right), expressed as quaternion.
'''
def __init__(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ATTITUDE_QUATERNION, 'ATTITUDE_QUATERNION')
self._fieldnames = ['time_boot_ms', 'q1', 'q2', 'q3', 'q4', 'rollspeed', 'pitchspeed', 'yawspeed']
self.time_boot_ms = time_boot_ms
self.q1 = q1
self.q2 = q2
self.q3 = q3
self.q4 = q4
self.rollspeed = rollspeed
self.pitchspeed = pitchspeed
self.yawspeed = yawspeed
def pack(self, mav):
return MAVLink_message.pack(self, mav, 246, struct.pack('<Ifffffff', self.time_boot_ms, self.q1, self.q2, self.q3, self.q4, self.rollspeed, self.pitchspeed, self.yawspeed))
class MAVLink_local_position_ned_message(MAVLink_message):
'''
The filtered local position (e.g. fused computer vision and
accelerometers). Coordinate frame is right-handed, Z-axis down
(aeronautical frame, NED / north-east-down convention)
'''
def __init__(self, time_boot_ms, x, y, z, vx, vy, vz):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_NED, 'LOCAL_POSITION_NED')
self._fieldnames = ['time_boot_ms', 'x', 'y', 'z', 'vx', 'vy', 'vz']
self.time_boot_ms = time_boot_ms
self.x = x
self.y = y
self.z = z
self.vx = vx
self.vy = vy
self.vz = vz
def pack(self, mav):
return MAVLink_message.pack(self, mav, 185, struct.pack('<Iffffff', self.time_boot_ms, self.x, self.y, self.z, self.vx, self.vy, self.vz))
class MAVLink_global_position_int_message(MAVLink_message):
'''
The filtered global position (e.g. fused GPS and
accelerometers). The position is in GPS-frame (right-handed,
Z-up). It is designed as scaled integer message
since the resolution of float is not sufficient.
'''
def __init__(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_POSITION_INT, 'GLOBAL_POSITION_INT')
self._fieldnames = ['time_boot_ms', 'lat', 'lon', 'alt', 'relative_alt', 'vx', 'vy', 'vz', 'hdg']
self.time_boot_ms = time_boot_ms
self.lat = lat
self.lon = lon
self.alt = alt
self.relative_alt = relative_alt
self.vx = vx
self.vy = vy
self.vz = vz
self.hdg = hdg
def pack(self, mav):
return MAVLink_message.pack(self, mav, 104, struct.pack('<IiiiihhhH', self.time_boot_ms, self.lat, self.lon, self.alt, self.relative_alt, self.vx, self.vy, self.vz, self.hdg))
class MAVLink_rc_channels_scaled_message(MAVLink_message):
'''
The scaled values of the RC channels received. (-100%) -10000,
(0%) 0, (100%) 10000. Channels that are inactive should be set
to 65535.
'''
def __init__(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_SCALED, 'RC_CHANNELS_SCALED')
self._fieldnames = ['time_boot_ms', 'port', 'chan1_scaled', 'chan2_scaled', 'chan3_scaled', 'chan4_scaled', 'chan5_scaled', 'chan6_scaled', 'chan7_scaled', 'chan8_scaled', 'rssi']
self.time_boot_ms = time_boot_ms
self.port = port
self.chan1_scaled = chan1_scaled
self.chan2_scaled = chan2_scaled
self.chan3_scaled = chan3_scaled
self.chan4_scaled = chan4_scaled
self.chan5_scaled = chan5_scaled
self.chan6_scaled = chan6_scaled
self.chan7_scaled = chan7_scaled
self.chan8_scaled = chan8_scaled
self.rssi = rssi
def pack(self, mav):
return MAVLink_message.pack(self, mav, 237, struct.pack('<IhhhhhhhhBB', self.time_boot_ms, self.chan1_scaled, self.chan2_scaled, self.chan3_scaled, self.chan4_scaled, self.chan5_scaled, self.chan6_scaled, self.chan7_scaled, self.chan8_scaled, self.port, self.rssi))
class MAVLink_rc_channels_raw_message(MAVLink_message):
'''
The RAW values of the RC channels received. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters might
violate this specification.
'''
def __init__(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_RAW, 'RC_CHANNELS_RAW')
self._fieldnames = ['time_boot_ms', 'port', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw', 'rssi']
self.time_boot_ms = time_boot_ms
self.port = port
self.chan1_raw = chan1_raw
self.chan2_raw = chan2_raw
self.chan3_raw = chan3_raw
self.chan4_raw = chan4_raw
self.chan5_raw = chan5_raw
self.chan6_raw = chan6_raw
self.chan7_raw = chan7_raw
self.chan8_raw = chan8_raw
self.rssi = rssi
def pack(self, mav):
return MAVLink_message.pack(self, mav, 244, struct.pack('<IHHHHHHHHBB', self.time_boot_ms, self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.port, self.rssi))
class MAVLink_servo_output_raw_message(MAVLink_message):
'''
The RAW values of the servo outputs (for RC input from the
remote, use the RC_CHANNELS messages). The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%.
'''
def __init__(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SERVO_OUTPUT_RAW, 'SERVO_OUTPUT_RAW')
self._fieldnames = ['time_boot_ms', 'port', 'servo1_raw', 'servo2_raw', 'servo3_raw', 'servo4_raw', 'servo5_raw', 'servo6_raw', 'servo7_raw', 'servo8_raw']
self.time_boot_ms = time_boot_ms
self.port = port
self.servo1_raw = servo1_raw
self.servo2_raw = servo2_raw
self.servo3_raw = servo3_raw
self.servo4_raw = servo4_raw
self.servo5_raw = servo5_raw
self.servo6_raw = servo6_raw
self.servo7_raw = servo7_raw
self.servo8_raw = servo8_raw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 242, struct.pack('<IHHHHHHHHB', self.time_boot_ms, self.servo1_raw, self.servo2_raw, self.servo3_raw, self.servo4_raw, self.servo5_raw, self.servo6_raw, self.servo7_raw, self.servo8_raw, self.port))
class MAVLink_mission_request_partial_list_message(MAVLink_message):
'''
Request a partial list of mission items from the
system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol. If start
and end index are the same, just send one waypoint.
'''
def __init__(self, target_system, target_component, start_index, end_index):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST, 'MISSION_REQUEST_PARTIAL_LIST')
self._fieldnames = ['target_system', 'target_component', 'start_index', 'end_index']
self.target_system = target_system
self.target_component = target_component
self.start_index = start_index
self.end_index = end_index
def pack(self, mav):
return MAVLink_message.pack(self, mav, 212, struct.pack('<hhBB', self.start_index, self.end_index, self.target_system, self.target_component))
class MAVLink_mission_write_partial_list_message(MAVLink_message):
'''
This message is sent to the MAV to write a partial list. If
start index == end index, only one item will be transmitted /
updated. If the start index is NOT 0 and above the current
list size, this request should be REJECTED!
'''
def __init__(self, target_system, target_component, start_index, end_index):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST, 'MISSION_WRITE_PARTIAL_LIST')
self._fieldnames = ['target_system', 'target_component', 'start_index', 'end_index']
self.target_system = target_system
self.target_component = target_component
self.start_index = start_index
self.end_index = end_index
def pack(self, mav):
return MAVLink_message.pack(self, mav, 9, struct.pack('<hhBB', self.start_index, self.end_index, self.target_system, self.target_component))
class MAVLink_mission_item_message(MAVLink_message):
'''
Message encoding a mission item. This message is emitted to
announce the presence of a mission item and to
set a mission item on the system. The mission item can be
either in x, y, z meters (type: LOCAL) or x:lat, y:lon,
z:altitude. Local frame is Z-down, right handed (NED), global
frame is Z-up, right handed (ENU). See also
http://qgroundcontrol.org/mavlink/waypoint_protocol.
'''
def __init__(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ITEM, 'MISSION_ITEM')
self._fieldnames = ['target_system', 'target_component', 'seq', 'frame', 'command', 'current', 'autocontinue', 'param1', 'param2', 'param3', 'param4', 'x', 'y', 'z']
self.target_system = target_system
self.target_component = target_component
self.seq = seq
self.frame = frame
self.command = command
self.current = current
self.autocontinue = autocontinue
self.param1 = param1
self.param2 = param2
self.param3 = param3
self.param4 = param4
self.x = x
self.y = y
self.z = z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 254, struct.pack('<fffffffHHBBBBB', self.param1, self.param2, self.param3, self.param4, self.x, self.y, self.z, self.seq, self.command, self.target_system, self.target_component, self.frame, self.current, self.autocontinue))
class MAVLink_mission_request_message(MAVLink_message):
'''
Request the information of the mission item with the sequence
number seq. The response of the system to this message should
be a MISSION_ITEM message.
http://qgroundcontrol.org/mavlink/waypoint_protocol
'''
def __init__(self, target_system, target_component, seq):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST, 'MISSION_REQUEST')
self._fieldnames = ['target_system', 'target_component', 'seq']
self.target_system = target_system
self.target_component = target_component
self.seq = seq
def pack(self, mav):
return MAVLink_message.pack(self, mav, 230, struct.pack('<HBB', self.seq, self.target_system, self.target_component))
class MAVLink_mission_set_current_message(MAVLink_message):
'''
Set the mission item with sequence number seq as current item.
This means that the MAV will continue to this mission item on
the shortest path (not following the mission items in-
between).
'''
def __init__(self, target_system, target_component, seq):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_SET_CURRENT, 'MISSION_SET_CURRENT')
self._fieldnames = ['target_system', 'target_component', 'seq']
self.target_system = target_system
self.target_component = target_component
self.seq = seq
def pack(self, mav):
return MAVLink_message.pack(self, mav, 28, struct.pack('<HBB', self.seq, self.target_system, self.target_component))
class MAVLink_mission_current_message(MAVLink_message):
'''
Message that announces the sequence number of the current
active mission item. The MAV will fly towards this mission
item.
'''
def __init__(self, seq):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_CURRENT, 'MISSION_CURRENT')
self._fieldnames = ['seq']
self.seq = seq
def pack(self, mav):
return MAVLink_message.pack(self, mav, 28, struct.pack('<H', self.seq))
class MAVLink_mission_request_list_message(MAVLink_message):
'''
Request the overall list of mission items from the
system/component.
'''
def __init__(self, target_system, target_component):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST_LIST, 'MISSION_REQUEST_LIST')
self._fieldnames = ['target_system', 'target_component']
self.target_system = target_system
self.target_component = target_component
def pack(self, mav):
return MAVLink_message.pack(self, mav, 132, struct.pack('<BB', self.target_system, self.target_component))
class MAVLink_mission_count_message(MAVLink_message):
'''
This message is emitted as response to MISSION_REQUEST_LIST by
the MAV and to initiate a write transaction. The GCS can then
request the individual mission item based on the knowledge of
the total number of MISSIONs.
'''
def __init__(self, target_system, target_component, count):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_COUNT, 'MISSION_COUNT')
self._fieldnames = ['target_system', 'target_component', 'count']
self.target_system = target_system
self.target_component = target_component
self.count = count
def pack(self, mav):
return MAVLink_message.pack(self, mav, 221, struct.pack('<HBB', self.count, self.target_system, self.target_component))
class MAVLink_mission_clear_all_message(MAVLink_message):
'''
Delete all mission items at once.
'''
def __init__(self, target_system, target_component):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_CLEAR_ALL, 'MISSION_CLEAR_ALL')
self._fieldnames = ['target_system', 'target_component']
self.target_system = target_system
self.target_component = target_component
def pack(self, mav):
return MAVLink_message.pack(self, mav, 232, struct.pack('<BB', self.target_system, self.target_component))
class MAVLink_mission_item_reached_message(MAVLink_message):
'''
A certain mission item has been reached. The system will
either hold this position (or circle on the orbit) or (if the
autocontinue on the WP was set) continue to the next MISSION.
'''
def __init__(self, seq):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ITEM_REACHED, 'MISSION_ITEM_REACHED')
self._fieldnames = ['seq']
self.seq = seq
def pack(self, mav):
return MAVLink_message.pack(self, mav, 11, struct.pack('<H', self.seq))
class MAVLink_mission_ack_message(MAVLink_message):
'''
Ack message during MISSION handling. The type field states if
this message is a positive ack (type=0) or if an error
happened (type=non-zero).
'''
def __init__(self, target_system, target_component, type):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ACK, 'MISSION_ACK')
self._fieldnames = ['target_system', 'target_component', 'type']
self.target_system = target_system
self.target_component = target_component
self.type = type
def pack(self, mav):
return MAVLink_message.pack(self, mav, 153, struct.pack('<BBB', self.target_system, self.target_component, self.type))
class MAVLink_set_gps_global_origin_message(MAVLink_message):
'''
As local waypoints exist, the global MISSION reference allows
to transform between the local coordinate frame and the global
(GPS) coordinate frame. This can be necessary when e.g. in-
and outdoor settings are connected and the MAV should move
from in- to outdoor.
'''
def __init__(self, target_system, latitude, longitude, altitude):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_GPS_GLOBAL_ORIGIN, 'SET_GPS_GLOBAL_ORIGIN')
self._fieldnames = ['target_system', 'latitude', 'longitude', 'altitude']
self.target_system = target_system
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def pack(self, mav):
return MAVLink_message.pack(self, mav, 41, struct.pack('<iiiB', self.latitude, self.longitude, self.altitude, self.target_system))
class MAVLink_gps_global_origin_message(MAVLink_message):
'''
Once the MAV sets a new GPS-Local correspondence, this message
announces the origin (0,0,0) position
'''
def __init__(self, latitude, longitude, altitude):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GPS_GLOBAL_ORIGIN, 'GPS_GLOBAL_ORIGIN')
self._fieldnames = ['latitude', 'longitude', 'altitude']
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def pack(self, mav):
return MAVLink_message.pack(self, mav, 39, struct.pack('<iii', self.latitude, self.longitude, self.altitude))
class MAVLink_set_local_position_setpoint_message(MAVLink_message):
'''
Set the setpoint for a local position controller. This is the
position in local coordinates the MAV should fly to. This
message is sent by the path/MISSION planner to the onboard
position controller. As some MAVs have a degree of freedom in
yaw (e.g. all helicopters/quadrotors), the desired yaw angle
is part of the message.
'''
def __init__(self, target_system, target_component, coordinate_frame, x, y, z, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_LOCAL_POSITION_SETPOINT, 'SET_LOCAL_POSITION_SETPOINT')
self._fieldnames = ['target_system', 'target_component', 'coordinate_frame', 'x', 'y', 'z', 'yaw']
self.target_system = target_system
self.target_component = target_component
self.coordinate_frame = coordinate_frame
self.x = x
self.y = y
self.z = z
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 214, struct.pack('<ffffBBB', self.x, self.y, self.z, self.yaw, self.target_system, self.target_component, self.coordinate_frame))
class MAVLink_local_position_setpoint_message(MAVLink_message):
'''
Transmit the current local setpoint of the controller to other
MAVs (collision avoidance) and to the GCS.
'''
def __init__(self, coordinate_frame, x, y, z, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_SETPOINT, 'LOCAL_POSITION_SETPOINT')
self._fieldnames = ['coordinate_frame', 'x', 'y', 'z', 'yaw']
self.coordinate_frame = coordinate_frame
self.x = x
self.y = y
self.z = z
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 223, struct.pack('<ffffB', self.x, self.y, self.z, self.yaw, self.coordinate_frame))
class MAVLink_global_position_setpoint_int_message(MAVLink_message):
'''
Transmit the current local setpoint of the controller to other
MAVs (collision avoidance) and to the GCS.
'''
def __init__(self, coordinate_frame, latitude, longitude, altitude, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_POSITION_SETPOINT_INT, 'GLOBAL_POSITION_SETPOINT_INT')
self._fieldnames = ['coordinate_frame', 'latitude', 'longitude', 'altitude', 'yaw']
self.coordinate_frame = coordinate_frame
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 141, struct.pack('<iiihB', self.latitude, self.longitude, self.altitude, self.yaw, self.coordinate_frame))
class MAVLink_set_global_position_setpoint_int_message(MAVLink_message):
'''
Set the current global position setpoint.
'''
def __init__(self, coordinate_frame, latitude, longitude, altitude, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_GLOBAL_POSITION_SETPOINT_INT, 'SET_GLOBAL_POSITION_SETPOINT_INT')
self._fieldnames = ['coordinate_frame', 'latitude', 'longitude', 'altitude', 'yaw']
self.coordinate_frame = coordinate_frame
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 33, struct.pack('<iiihB', self.latitude, self.longitude, self.altitude, self.yaw, self.coordinate_frame))
class MAVLink_safety_set_allowed_area_message(MAVLink_message):
'''
Set a safety zone (volume), which is defined by two corners of
a cube. This message can be used to tell the MAV which
setpoints/MISSIONs to accept and which to reject. Safety areas
are often enforced by national or competition regulations.
'''
def __init__(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SAFETY_SET_ALLOWED_AREA, 'SAFETY_SET_ALLOWED_AREA')
self._fieldnames = ['target_system', 'target_component', 'frame', 'p1x', 'p1y', 'p1z', 'p2x', 'p2y', 'p2z']
self.target_system = target_system
self.target_component = target_component
self.frame = frame
self.p1x = p1x
self.p1y = p1y
self.p1z = p1z
self.p2x = p2x
self.p2y = p2y
self.p2z = p2z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 15, struct.pack('<ffffffBBB', self.p1x, self.p1y, self.p1z, self.p2x, self.p2y, self.p2z, self.target_system, self.target_component, self.frame))
class MAVLink_safety_allowed_area_message(MAVLink_message):
'''
Read out the safety zone the MAV currently assumes.
'''
def __init__(self, frame, p1x, p1y, p1z, p2x, p2y, p2z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SAFETY_ALLOWED_AREA, 'SAFETY_ALLOWED_AREA')
self._fieldnames = ['frame', 'p1x', 'p1y', 'p1z', 'p2x', 'p2y', 'p2z']
self.frame = frame
self.p1x = p1x
self.p1y = p1y
self.p1z = p1z
self.p2x = p2x
self.p2y = p2y
self.p2z = p2z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 3, struct.pack('<ffffffB', self.p1x, self.p1y, self.p1z, self.p2x, self.p2y, self.p2z, self.frame))
class MAVLink_set_roll_pitch_yaw_thrust_message(MAVLink_message):
'''
Set roll, pitch and yaw.
'''
def __init__(self, target_system, target_component, roll, pitch, yaw, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_THRUST, 'SET_ROLL_PITCH_YAW_THRUST')
self._fieldnames = ['target_system', 'target_component', 'roll', 'pitch', 'yaw', 'thrust']
self.target_system = target_system
self.target_component = target_component
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 100, struct.pack('<ffffBB', self.roll, self.pitch, self.yaw, self.thrust, self.target_system, self.target_component))
class MAVLink_set_roll_pitch_yaw_speed_thrust_message(MAVLink_message):
'''
Set roll, pitch and yaw.
'''
def __init__(self, target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_SPEED_THRUST, 'SET_ROLL_PITCH_YAW_SPEED_THRUST')
self._fieldnames = ['target_system', 'target_component', 'roll_speed', 'pitch_speed', 'yaw_speed', 'thrust']
self.target_system = target_system
self.target_component = target_component
self.roll_speed = roll_speed
self.pitch_speed = pitch_speed
self.yaw_speed = yaw_speed
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 24, struct.pack('<ffffBB', self.roll_speed, self.pitch_speed, self.yaw_speed, self.thrust, self.target_system, self.target_component))
class MAVLink_roll_pitch_yaw_thrust_setpoint_message(MAVLink_message):
'''
Setpoint in roll, pitch, yaw currently active on the system.
'''
def __init__(self, time_boot_ms, roll, pitch, yaw, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ROLL_PITCH_YAW_THRUST_SETPOINT, 'ROLL_PITCH_YAW_THRUST_SETPOINT')
self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'thrust']
self.time_boot_ms = time_boot_ms
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 239, struct.pack('<Iffff', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.thrust))
class MAVLink_roll_pitch_yaw_speed_thrust_setpoint_message(MAVLink_message):
'''
Setpoint in rollspeed, pitchspeed, yawspeed currently active
on the system.
'''
def __init__(self, time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT, 'ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT')
self._fieldnames = ['time_boot_ms', 'roll_speed', 'pitch_speed', 'yaw_speed', 'thrust']
self.time_boot_ms = time_boot_ms
self.roll_speed = roll_speed
self.pitch_speed = pitch_speed
self.yaw_speed = yaw_speed
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 238, struct.pack('<Iffff', self.time_boot_ms, self.roll_speed, self.pitch_speed, self.yaw_speed, self.thrust))
class MAVLink_set_quad_motors_setpoint_message(MAVLink_message):
'''
Setpoint in the four motor speeds
'''
def __init__(self, target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_QUAD_MOTORS_SETPOINT, 'SET_QUAD_MOTORS_SETPOINT')
self._fieldnames = ['target_system', 'motor_front_nw', 'motor_right_ne', 'motor_back_se', 'motor_left_sw']
self.target_system = target_system
self.motor_front_nw = motor_front_nw
self.motor_right_ne = motor_right_ne
self.motor_back_se = motor_back_se
self.motor_left_sw = motor_left_sw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 30, struct.pack('<HHHHB', self.motor_front_nw, self.motor_right_ne, self.motor_back_se, self.motor_left_sw, self.target_system))
class MAVLink_set_quad_swarm_roll_pitch_yaw_thrust_message(MAVLink_message):
'''
Setpoint for up to four quadrotors in a group / wing
'''
def __init__(self, group, mode, roll, pitch, yaw, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST, 'SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST')
self._fieldnames = ['group', 'mode', 'roll', 'pitch', 'yaw', 'thrust']
self.group = group
self.mode = mode
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 240, struct.pack('<4h4h4h4HBB', self.roll, self.pitch, self.yaw, self.thrust, self.group, self.mode))
class MAVLink_nav_controller_output_message(MAVLink_message):
'''
Outputs of the APM navigation controller. The primary use of
this message is to check the response and signs of the
controller before actual flight and to assist with tuning
controller parameters.
'''
def __init__(self, nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAV_CONTROLLER_OUTPUT, 'NAV_CONTROLLER_OUTPUT')
self._fieldnames = ['nav_roll', 'nav_pitch', 'nav_bearing', 'target_bearing', 'wp_dist', 'alt_error', 'aspd_error', 'xtrack_error']
self.nav_roll = nav_roll
self.nav_pitch = nav_pitch
self.nav_bearing = nav_bearing
self.target_bearing = target_bearing
self.wp_dist = wp_dist
self.alt_error = alt_error
self.aspd_error = aspd_error
self.xtrack_error = xtrack_error
def pack(self, mav):
return MAVLink_message.pack(self, mav, 183, struct.pack('<fffffhhH', self.nav_roll, self.nav_pitch, self.alt_error, self.aspd_error, self.xtrack_error, self.nav_bearing, self.target_bearing, self.wp_dist))
class MAVLink_set_quad_swarm_led_roll_pitch_yaw_thrust_message(MAVLink_message):
'''
Setpoint for up to four quadrotors in a group / wing
'''
def __init__(self, group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST, 'SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST')
self._fieldnames = ['group', 'mode', 'led_red', 'led_blue', 'led_green', 'roll', 'pitch', 'yaw', 'thrust']
self.group = group
self.mode = mode
self.led_red = led_red
self.led_blue = led_blue
self.led_green = led_green
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 130, struct.pack('<4h4h4h4HBB4s4s4s', self.roll, self.pitch, self.yaw, self.thrust, self.group, self.mode, self.led_red, self.led_blue, self.led_green))
class MAVLink_state_correction_message(MAVLink_message):
'''
Corrects the systems state by adding an error correction term
to the position and velocity, and by rotating the attitude by
a correction angle.
'''
def __init__(self, xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_STATE_CORRECTION, 'STATE_CORRECTION')
self._fieldnames = ['xErr', 'yErr', 'zErr', 'rollErr', 'pitchErr', 'yawErr', 'vxErr', 'vyErr', 'vzErr']
self.xErr = xErr
self.yErr = yErr
self.zErr = zErr
self.rollErr = rollErr
self.pitchErr = pitchErr
self.yawErr = yawErr
self.vxErr = vxErr
self.vyErr = vyErr
self.vzErr = vzErr
def pack(self, mav):
return MAVLink_message.pack(self, mav, 130, struct.pack('<fffffffff', self.xErr, self.yErr, self.zErr, self.rollErr, self.pitchErr, self.yawErr, self.vxErr, self.vyErr, self.vzErr))
class MAVLink_request_data_stream_message(MAVLink_message):
'''
'''
def __init__(self, target_system, target_component, req_stream_id, req_message_rate, start_stop):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_REQUEST_DATA_STREAM, 'REQUEST_DATA_STREAM')
self._fieldnames = ['target_system', 'target_component', 'req_stream_id', 'req_message_rate', 'start_stop']
self.target_system = target_system
self.target_component = target_component
self.req_stream_id = req_stream_id
self.req_message_rate = req_message_rate
self.start_stop = start_stop
def pack(self, mav):
return MAVLink_message.pack(self, mav, 148, struct.pack('<HBBBB', self.req_message_rate, self.target_system, self.target_component, self.req_stream_id, self.start_stop))
class MAVLink_data_stream_message(MAVLink_message):
'''
'''
def __init__(self, stream_id, message_rate, on_off):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_DATA_STREAM, 'DATA_STREAM')
self._fieldnames = ['stream_id', 'message_rate', 'on_off']
self.stream_id = stream_id
self.message_rate = message_rate
self.on_off = on_off
def pack(self, mav):
return MAVLink_message.pack(self, mav, 21, struct.pack('<HBB', self.message_rate, self.stream_id, self.on_off))
class MAVLink_manual_control_message(MAVLink_message):
'''
This message provides an API for manually controlling the
vehicle using standard joystick axes nomenclature, along with
a joystick-like input device. Unused axes can be disabled an
buttons are also transmit as boolean values of their
'''
def __init__(self, target, x, y, z, r, buttons):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MANUAL_CONTROL, 'MANUAL_CONTROL')
self._fieldnames = ['target', 'x', 'y', 'z', 'r', 'buttons']
self.target = target
self.x = x
self.y = y
self.z = z
self.r = r
self.buttons = buttons
def pack(self, mav):
return MAVLink_message.pack(self, mav, 243, struct.pack('<hhhhHB', self.x, self.y, self.z, self.r, self.buttons, self.target))
class MAVLink_rc_channels_override_message(MAVLink_message):
'''
The RAW values of the RC channels sent to the MAV to override
info received from the RC radio. A value of -1 means no change
to that channel. A value of 0 means control of that channel
should be released back to the RC radio. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters might
violate this specification.
'''
def __init__(self, target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_OVERRIDE, 'RC_CHANNELS_OVERRIDE')
self._fieldnames = ['target_system', 'target_component', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw']
self.target_system = target_system
self.target_component = target_component
self.chan1_raw = chan1_raw
self.chan2_raw = chan2_raw
self.chan3_raw = chan3_raw
self.chan4_raw = chan4_raw
self.chan5_raw = chan5_raw
self.chan6_raw = chan6_raw
self.chan7_raw = chan7_raw
self.chan8_raw = chan8_raw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 124, struct.pack('<HHHHHHHHBB', self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.target_system, self.target_component))
class MAVLink_vfr_hud_message(MAVLink_message):
'''
Metrics typically displayed on a HUD for fixed wing aircraft
'''
def __init__(self, airspeed, groundspeed, heading, throttle, alt, climb):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VFR_HUD, 'VFR_HUD')
self._fieldnames = ['airspeed', 'groundspeed', 'heading', 'throttle', 'alt', 'climb']
self.airspeed = airspeed
self.groundspeed = groundspeed
self.heading = heading
self.throttle = throttle
self.alt = alt
self.climb = climb
def pack(self, mav):
return MAVLink_message.pack(self, mav, 20, struct.pack('<ffffhH', self.airspeed, self.groundspeed, self.alt, self.climb, self.heading, self.throttle))
class MAVLink_command_long_message(MAVLink_message):
'''
Send a command with up to four parameters to the MAV
'''
def __init__(self, target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_COMMAND_LONG, 'COMMAND_LONG')
self._fieldnames = ['target_system', 'target_component', 'command', 'confirmation', 'param1', 'param2', 'param3', 'param4', 'param5', 'param6', 'param7']
self.target_system = target_system
self.target_component = target_component
self.command = command
self.confirmation = confirmation
self.param1 = param1
self.param2 = param2
self.param3 = param3
self.param4 = param4
self.param5 = param5
self.param6 = param6
self.param7 = param7
def pack(self, mav):
return MAVLink_message.pack(self, mav, 152, struct.pack('<fffffffHBBB', self.param1, self.param2, self.param3, self.param4, self.param5, self.param6, self.param7, self.command, self.target_system, self.target_component, self.confirmation))
class MAVLink_command_ack_message(MAVLink_message):
'''
Report status of a command. Includes feedback wether the
command was executed.
'''
def __init__(self, command, result):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_COMMAND_ACK, 'COMMAND_ACK')
self._fieldnames = ['command', 'result']
self.command = command
self.result = result
def pack(self, mav):
return MAVLink_message.pack(self, mav, 143, struct.pack('<HB', self.command, self.result))
class MAVLink_roll_pitch_yaw_rates_thrust_setpoint_message(MAVLink_message):
'''
Setpoint in roll, pitch, yaw rates and thrust currently active
on the system.
'''
def __init__(self, time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_ROLL_PITCH_YAW_RATES_THRUST_SETPOINT, 'ROLL_PITCH_YAW_RATES_THRUST_SETPOINT')
self._fieldnames = ['time_boot_ms', 'roll_rate', 'pitch_rate', 'yaw_rate', 'thrust']
self.time_boot_ms = time_boot_ms
self.roll_rate = roll_rate
self.pitch_rate = pitch_rate
self.yaw_rate = yaw_rate
self.thrust = thrust
def pack(self, mav):
return MAVLink_message.pack(self, mav, 127, struct.pack('<Iffff', self.time_boot_ms, self.roll_rate, self.pitch_rate, self.yaw_rate, self.thrust))
class MAVLink_manual_setpoint_message(MAVLink_message):
'''
Setpoint in roll, pitch, yaw and thrust from the operator
'''
def __init__(self, time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MANUAL_SETPOINT, 'MANUAL_SETPOINT')
self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'thrust', 'mode_switch', 'manual_override_switch']
self.time_boot_ms = time_boot_ms
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.thrust = thrust
self.mode_switch = mode_switch
self.manual_override_switch = manual_override_switch
def pack(self, mav):
return MAVLink_message.pack(self, mav, 106, struct.pack('<IffffBB', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.thrust, self.mode_switch, self.manual_override_switch))
class MAVLink_local_position_ned_system_global_offset_message(MAVLink_message):
'''
The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED
messages of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis down
(aeronautical frame, NED / north-east-down convention)
'''
def __init__(self, time_boot_ms, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET, 'LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET')
self._fieldnames = ['time_boot_ms', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.time_boot_ms = time_boot_ms
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 231, struct.pack('<Iffffff', self.time_boot_ms, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_hil_state_message(MAVLink_message):
'''
Sent from simulation to autopilot. This packet is useful for
high throughput applications such as hardware in the loop
simulations.
'''
def __init__(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_STATE, 'HIL_STATE')
self._fieldnames = ['time_usec', 'roll', 'pitch', 'yaw', 'rollspeed', 'pitchspeed', 'yawspeed', 'lat', 'lon', 'alt', 'vx', 'vy', 'vz', 'xacc', 'yacc', 'zacc']
self.time_usec = time_usec
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.rollspeed = rollspeed
self.pitchspeed = pitchspeed
self.yawspeed = yawspeed
self.lat = lat
self.lon = lon
self.alt = alt
self.vx = vx
self.vy = vy
self.vz = vz
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
def pack(self, mav):
return MAVLink_message.pack(self, mav, 183, struct.pack('<Qffffffiiihhhhhh', self.time_usec, self.roll, self.pitch, self.yaw, self.rollspeed, self.pitchspeed, self.yawspeed, self.lat, self.lon, self.alt, self.vx, self.vy, self.vz, self.xacc, self.yacc, self.zacc))
class MAVLink_hil_controls_message(MAVLink_message):
'''
Sent from autopilot to simulation. Hardware in the loop
control outputs
'''
def __init__(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_CONTROLS, 'HIL_CONTROLS')
self._fieldnames = ['time_usec', 'roll_ailerons', 'pitch_elevator', 'yaw_rudder', 'throttle', 'aux1', 'aux2', 'aux3', 'aux4', 'mode', 'nav_mode']
self.time_usec = time_usec
self.roll_ailerons = roll_ailerons
self.pitch_elevator = pitch_elevator
self.yaw_rudder = yaw_rudder
self.throttle = throttle
self.aux1 = aux1
self.aux2 = aux2
self.aux3 = aux3
self.aux4 = aux4
self.mode = mode
self.nav_mode = nav_mode
def pack(self, mav):
return MAVLink_message.pack(self, mav, 63, struct.pack('<QffffffffBB', self.time_usec, self.roll_ailerons, self.pitch_elevator, self.yaw_rudder, self.throttle, self.aux1, self.aux2, self.aux3, self.aux4, self.mode, self.nav_mode))
class MAVLink_hil_rc_inputs_raw_message(MAVLink_message):
'''
Sent from simulation to autopilot. The RAW values of the RC
channels received. The standard PPM modulation is as follows:
1000 microseconds: 0%, 2000 microseconds: 100%. Individual
receivers/transmitters might violate this specification.
'''
def __init__(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW, 'HIL_RC_INPUTS_RAW')
self._fieldnames = ['time_usec', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw', 'chan9_raw', 'chan10_raw', 'chan11_raw', 'chan12_raw', 'rssi']
self.time_usec = time_usec
self.chan1_raw = chan1_raw
self.chan2_raw = chan2_raw
self.chan3_raw = chan3_raw
self.chan4_raw = chan4_raw
self.chan5_raw = chan5_raw
self.chan6_raw = chan6_raw
self.chan7_raw = chan7_raw
self.chan8_raw = chan8_raw
self.chan9_raw = chan9_raw
self.chan10_raw = chan10_raw
self.chan11_raw = chan11_raw
self.chan12_raw = chan12_raw
self.rssi = rssi
def pack(self, mav):
return MAVLink_message.pack(self, mav, 54, struct.pack('<QHHHHHHHHHHHHB', self.time_usec, self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.chan9_raw, self.chan10_raw, self.chan11_raw, self.chan12_raw, self.rssi))
class MAVLink_optical_flow_message(MAVLink_message):
'''
Optical flow from a flow sensor (e.g. optical mouse sensor)
'''
def __init__(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_OPTICAL_FLOW, 'OPTICAL_FLOW')
self._fieldnames = ['time_usec', 'sensor_id', 'flow_x', 'flow_y', 'flow_comp_m_x', 'flow_comp_m_y', 'quality', 'ground_distance']
self.time_usec = time_usec
self.sensor_id = sensor_id
self.flow_x = flow_x
self.flow_y = flow_y
self.flow_comp_m_x = flow_comp_m_x
self.flow_comp_m_y = flow_comp_m_y
self.quality = quality
self.ground_distance = ground_distance
def pack(self, mav):
return MAVLink_message.pack(self, mav, 175, struct.pack('<QfffhhBB', self.time_usec, self.flow_comp_m_x, self.flow_comp_m_y, self.ground_distance, self.flow_x, self.flow_y, self.sensor_id, self.quality))
class MAVLink_global_vision_position_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE, 'GLOBAL_VISION_POSITION_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.usec = usec
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 102, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_vision_position_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE, 'VISION_POSITION_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.usec = usec
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 158, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_vision_speed_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE, 'VISION_SPEED_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z']
self.usec = usec
self.x = x
self.y = y
self.z = z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 208, struct.pack('<Qfff', self.usec, self.x, self.y, self.z))
class MAVLink_vicon_position_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE, 'VICON_POSITION_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.usec = usec
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 56, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_highres_imu_message(MAVLink_message):
'''
The IMU readings in SI units in NED body frame
'''
def __init__(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIGHRES_IMU, 'HIGHRES_IMU')
self._fieldnames = ['time_usec', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag', 'abs_pressure', 'diff_pressure', 'pressure_alt', 'temperature', 'fields_updated']
self.time_usec = time_usec
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
self.xgyro = xgyro
self.ygyro = ygyro
self.zgyro = zgyro
self.xmag = xmag
self.ymag = ymag
self.zmag = zmag
self.abs_pressure = abs_pressure
self.diff_pressure = diff_pressure
self.pressure_alt = pressure_alt
self.temperature = temperature
self.fields_updated = fields_updated
def pack(self, mav):
return MAVLink_message.pack(self, mav, 93, struct.pack('<QfffffffffffffH', self.time_usec, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag, self.abs_pressure, self.diff_pressure, self.pressure_alt, self.temperature, self.fields_updated))
class MAVLink_file_transfer_start_message(MAVLink_message):
'''
Begin file transfer
'''
def __init__(self, transfer_uid, dest_path, direction, file_size, flags):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_START, 'FILE_TRANSFER_START')
self._fieldnames = ['transfer_uid', 'dest_path', 'direction', 'file_size', 'flags']
self.transfer_uid = transfer_uid
self.dest_path = dest_path
self.direction = direction
self.file_size = file_size
self.flags = flags
def pack(self, mav):
return MAVLink_message.pack(self, mav, 235, struct.pack('<QI240sBB', self.transfer_uid, self.file_size, self.dest_path, self.direction, self.flags))
class MAVLink_file_transfer_dir_list_message(MAVLink_message):
'''
Get directory listing
'''
def __init__(self, transfer_uid, dir_path, flags):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST, 'FILE_TRANSFER_DIR_LIST')
self._fieldnames = ['transfer_uid', 'dir_path', 'flags']
self.transfer_uid = transfer_uid
self.dir_path = dir_path
self.flags = flags
def pack(self, mav):
return MAVLink_message.pack(self, mav, 93, struct.pack('<Q240sB', self.transfer_uid, self.dir_path, self.flags))
class MAVLink_file_transfer_res_message(MAVLink_message):
'''
File transfer result
'''
def __init__(self, transfer_uid, result):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_RES, 'FILE_TRANSFER_RES')
self._fieldnames = ['transfer_uid', 'result']
self.transfer_uid = transfer_uid
self.result = result
def pack(self, mav):
return MAVLink_message.pack(self, mav, 124, struct.pack('<QB', self.transfer_uid, self.result))
class MAVLink_battery_status_message(MAVLink_message):
'''
Transmitte battery informations for a accu pack.
'''
def __init__(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BATTERY_STATUS, 'BATTERY_STATUS')
self._fieldnames = ['accu_id', 'voltage_cell_1', 'voltage_cell_2', 'voltage_cell_3', 'voltage_cell_4', 'voltage_cell_5', 'voltage_cell_6', 'current_battery', 'battery_remaining']
self.accu_id = accu_id
self.voltage_cell_1 = voltage_cell_1
self.voltage_cell_2 = voltage_cell_2
self.voltage_cell_3 = voltage_cell_3
self.voltage_cell_4 = voltage_cell_4
self.voltage_cell_5 = voltage_cell_5
self.voltage_cell_6 = voltage_cell_6
self.current_battery = current_battery
self.battery_remaining = battery_remaining
def pack(self, mav):
return MAVLink_message.pack(self, mav, 42, struct.pack('<HHHHHHhBb', self.voltage_cell_1, self.voltage_cell_2, self.voltage_cell_3, self.voltage_cell_4, self.voltage_cell_5, self.voltage_cell_6, self.current_battery, self.accu_id, self.battery_remaining))
class MAVLink_setpoint_8dof_message(MAVLink_message):
'''
Set the 8 DOF setpoint for a controller.
'''
def __init__(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SETPOINT_8DOF, 'SETPOINT_8DOF')
self._fieldnames = ['target_system', 'val1', 'val2', 'val3', 'val4', 'val5', 'val6', 'val7', 'val8']
self.target_system = target_system
self.val1 = val1
self.val2 = val2
self.val3 = val3
self.val4 = val4
self.val5 = val5
self.val6 = val6
self.val7 = val7
self.val8 = val8
def pack(self, mav):
return MAVLink_message.pack(self, mav, 241, struct.pack('<ffffffffB', self.val1, self.val2, self.val3, self.val4, self.val5, self.val6, self.val7, self.val8, self.target_system))
class MAVLink_setpoint_6dof_message(MAVLink_message):
'''
Set the 6 DOF setpoint for a attitude and position controller.
'''
def __init__(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SETPOINT_6DOF, 'SETPOINT_6DOF')
self._fieldnames = ['target_system', 'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']
self.target_system = target_system
self.trans_x = trans_x
self.trans_y = trans_y
self.trans_z = trans_z
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 15, struct.pack('<ffffffB', self.trans_x, self.trans_y, self.trans_z, self.rot_x, self.rot_y, self.rot_z, self.target_system))
class MAVLink_memory_vect_message(MAVLink_message):
'''
Send raw controller memory. The use of this message is
discouraged for normal packets, but a quite efficient way for
testing new messages and getting experimental debug output.
'''
def __init__(self, address, ver, type, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MEMORY_VECT, 'MEMORY_VECT')
self._fieldnames = ['address', 'ver', 'type', 'value']
self.address = address
self.ver = ver
self.type = type
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 204, struct.pack('<HBB32s', self.address, self.ver, self.type, self.value))
class MAVLink_debug_vect_message(MAVLink_message):
'''
'''
def __init__(self, name, time_usec, x, y, z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_DEBUG_VECT, 'DEBUG_VECT')
self._fieldnames = ['name', 'time_usec', 'x', 'y', 'z']
self.name = name
self.time_usec = time_usec
self.x = x
self.y = y
self.z = z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 49, struct.pack('<Qfff10s', self.time_usec, self.x, self.y, self.z, self.name))
class MAVLink_named_value_float_message(MAVLink_message):
'''
Send a key-value pair as float. The use of this message is
discouraged for normal packets, but a quite efficient way for
testing new messages and getting experimental debug output.
'''
def __init__(self, time_boot_ms, name, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAMED_VALUE_FLOAT, 'NAMED_VALUE_FLOAT')
self._fieldnames = ['time_boot_ms', 'name', 'value']
self.time_boot_ms = time_boot_ms
self.name = name
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 170, struct.pack('<If10s', self.time_boot_ms, self.value, self.name))
class MAVLink_named_value_int_message(MAVLink_message):
'''
Send a key-value pair as integer. The use of this message is
discouraged for normal packets, but a quite efficient way for
testing new messages and getting experimental debug output.
'''
def __init__(self, time_boot_ms, name, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAMED_VALUE_INT, 'NAMED_VALUE_INT')
self._fieldnames = ['time_boot_ms', 'name', 'value']
self.time_boot_ms = time_boot_ms
self.name = name
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 44, struct.pack('<Ii10s', self.time_boot_ms, self.value, self.name))
class MAVLink_statustext_message(MAVLink_message):
'''
Status text message. These messages are printed in yellow in
the COMM console of QGroundControl. WARNING: They consume
quite some bandwidth, so use only for important status and
error messages. If implemented wisely, these messages are
buffered on the MCU and sent only at a limited rate (e.g. 10
Hz).
'''
def __init__(self, severity, text):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_STATUSTEXT, 'STATUSTEXT')
self._fieldnames = ['severity', 'text']
self.severity = severity
self.text = text
def pack(self, mav):
return MAVLink_message.pack(self, mav, 83, struct.pack('<B50s', self.severity, self.text))
class MAVLink_debug_message(MAVLink_message):
'''
Send a debug value. The index is used to discriminate between
values. These values show up in the plot of QGroundControl as
DEBUG N.
'''
def __init__(self, time_boot_ms, ind, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_DEBUG, 'DEBUG')
self._fieldnames = ['time_boot_ms', 'ind', 'value']
self.time_boot_ms = time_boot_ms
self.ind = ind
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 46, struct.pack('<IfB', self.time_boot_ms, self.value, self.ind))
mavlink_map = {
MAVLINK_MSG_ID_HEARTBEAT : ( '<IBBBBB', MAVLink_heartbeat_message, [1, 2, 3, 0, 4, 5], 50 ),
MAVLINK_MSG_ID_SYS_STATUS : ( '<IIIHHhHHHHHHb', MAVLink_sys_status_message, [0, 1, 2, 3, 4, 5, 12, 6, 7, 8, 9, 10, 11], 124 ),
MAVLINK_MSG_ID_SYSTEM_TIME : ( '<QI', MAVLink_system_time_message, [0, 1], 137 ),
MAVLINK_MSG_ID_PING : ( '<QIBB', MAVLink_ping_message, [0, 1, 2, 3], 237 ),
MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL : ( '<BBB25s', MAVLink_change_operator_control_message, [0, 1, 2, 3], 217 ),
MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL_ACK : ( '<BBB', MAVLink_change_operator_control_ack_message, [0, 1, 2], 104 ),
MAVLINK_MSG_ID_AUTH_KEY : ( '<32s', MAVLink_auth_key_message, [0], 119 ),
MAVLINK_MSG_ID_SET_MODE : ( '<IBB', MAVLink_set_mode_message, [1, 2, 0], 89 ),
MAVLINK_MSG_ID_PARAM_REQUEST_READ : ( '<hBB16s', MAVLink_param_request_read_message, [1, 2, 3, 0], 214 ),
MAVLINK_MSG_ID_PARAM_REQUEST_LIST : ( '<BB', MAVLink_param_request_list_message, [0, 1], 159 ),
MAVLINK_MSG_ID_PARAM_VALUE : ( '<fHH16sB', MAVLink_param_value_message, [3, 0, 4, 1, 2], 220 ),
MAVLINK_MSG_ID_PARAM_SET : ( '<fBB16sB', MAVLink_param_set_message, [1, 2, 3, 0, 4], 168 ),
MAVLINK_MSG_ID_GPS_RAW_INT : ( '<QiiiHHHHBB', MAVLink_gps_raw_int_message, [0, 8, 1, 2, 3, 4, 5, 6, 7, 9], 24 ),
MAVLINK_MSG_ID_GPS_STATUS : ( '<B20s20s20s20s20s', MAVLink_gps_status_message, [0, 1, 2, 3, 4, 5], 23 ),
MAVLINK_MSG_ID_SCALED_IMU : ( '<Ihhhhhhhhh', MAVLink_scaled_imu_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 170 ),
MAVLINK_MSG_ID_RAW_IMU : ( '<Qhhhhhhhhh', MAVLink_raw_imu_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 144 ),
MAVLINK_MSG_ID_RAW_PRESSURE : ( '<Qhhhh', MAVLink_raw_pressure_message, [0, 1, 2, 3, 4], 67 ),
MAVLINK_MSG_ID_SCALED_PRESSURE : ( '<Iffh', MAVLink_scaled_pressure_message, [0, 1, 2, 3], 115 ),
MAVLINK_MSG_ID_ATTITUDE : ( '<Iffffff', MAVLink_attitude_message, [0, 1, 2, 3, 4, 5, 6], 39 ),
MAVLINK_MSG_ID_ATTITUDE_QUATERNION : ( '<Ifffffff', MAVLink_attitude_quaternion_message, [0, 1, 2, 3, 4, 5, 6, 7], 246 ),
MAVLINK_MSG_ID_LOCAL_POSITION_NED : ( '<Iffffff', MAVLink_local_position_ned_message, [0, 1, 2, 3, 4, 5, 6], 185 ),
MAVLINK_MSG_ID_GLOBAL_POSITION_INT : ( '<IiiiihhhH', MAVLink_global_position_int_message, [0, 1, 2, 3, 4, 5, 6, 7, 8], 104 ),
MAVLINK_MSG_ID_RC_CHANNELS_SCALED : ( '<IhhhhhhhhBB', MAVLink_rc_channels_scaled_message, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8, 10], 237 ),
MAVLINK_MSG_ID_RC_CHANNELS_RAW : ( '<IHHHHHHHHBB', MAVLink_rc_channels_raw_message, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8, 10], 244 ),
MAVLINK_MSG_ID_SERVO_OUTPUT_RAW : ( '<IHHHHHHHHB', MAVLink_servo_output_raw_message, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8], 242 ),
MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST : ( '<hhBB', MAVLink_mission_request_partial_list_message, [2, 3, 0, 1], 212 ),
MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST : ( '<hhBB', MAVLink_mission_write_partial_list_message, [2, 3, 0, 1], 9 ),
MAVLINK_MSG_ID_MISSION_ITEM : ( '<fffffffHHBBBBB', MAVLink_mission_item_message, [9, 10, 7, 11, 8, 12, 13, 0, 1, 2, 3, 4, 5, 6], 254 ),
MAVLINK_MSG_ID_MISSION_REQUEST : ( '<HBB', MAVLink_mission_request_message, [1, 2, 0], 230 ),
MAVLINK_MSG_ID_MISSION_SET_CURRENT : ( '<HBB', MAVLink_mission_set_current_message, [1, 2, 0], 28 ),
MAVLINK_MSG_ID_MISSION_CURRENT : ( '<H', MAVLink_mission_current_message, [0], 28 ),
MAVLINK_MSG_ID_MISSION_REQUEST_LIST : ( '<BB', MAVLink_mission_request_list_message, [0, 1], 132 ),
MAVLINK_MSG_ID_MISSION_COUNT : ( '<HBB', MAVLink_mission_count_message, [1, 2, 0], 221 ),
MAVLINK_MSG_ID_MISSION_CLEAR_ALL : ( '<BB', MAVLink_mission_clear_all_message, [0, 1], 232 ),
MAVLINK_MSG_ID_MISSION_ITEM_REACHED : ( '<H', MAVLink_mission_item_reached_message, [0], 11 ),
MAVLINK_MSG_ID_MISSION_ACK : ( '<BBB', MAVLink_mission_ack_message, [0, 1, 2], 153 ),
MAVLINK_MSG_ID_SET_GPS_GLOBAL_ORIGIN : ( '<iiiB', MAVLink_set_gps_global_origin_message, [3, 0, 1, 2], 41 ),
MAVLINK_MSG_ID_GPS_GLOBAL_ORIGIN : ( '<iii', MAVLink_gps_global_origin_message, [0, 1, 2], 39 ),
MAVLINK_MSG_ID_SET_LOCAL_POSITION_SETPOINT : ( '<ffffBBB', MAVLink_set_local_position_setpoint_message, [4, 5, 6, 0, 1, 2, 3], 214 ),
MAVLINK_MSG_ID_LOCAL_POSITION_SETPOINT : ( '<ffffB', MAVLink_local_position_setpoint_message, [4, 0, 1, 2, 3], 223 ),
MAVLINK_MSG_ID_GLOBAL_POSITION_SETPOINT_INT : ( '<iiihB', MAVLink_global_position_setpoint_int_message, [4, 0, 1, 2, 3], 141 ),
MAVLINK_MSG_ID_SET_GLOBAL_POSITION_SETPOINT_INT : ( '<iiihB', MAVLink_set_global_position_setpoint_int_message, [4, 0, 1, 2, 3], 33 ),
MAVLINK_MSG_ID_SAFETY_SET_ALLOWED_AREA : ( '<ffffffBBB', MAVLink_safety_set_allowed_area_message, [6, 7, 8, 0, 1, 2, 3, 4, 5], 15 ),
MAVLINK_MSG_ID_SAFETY_ALLOWED_AREA : ( '<ffffffB', MAVLink_safety_allowed_area_message, [6, 0, 1, 2, 3, 4, 5], 3 ),
MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_THRUST : ( '<ffffBB', MAVLink_set_roll_pitch_yaw_thrust_message, [4, 5, 0, 1, 2, 3], 100 ),
MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_SPEED_THRUST : ( '<ffffBB', MAVLink_set_roll_pitch_yaw_speed_thrust_message, [4, 5, 0, 1, 2, 3], 24 ),
MAVLINK_MSG_ID_ROLL_PITCH_YAW_THRUST_SETPOINT : ( '<Iffff', MAVLink_roll_pitch_yaw_thrust_setpoint_message, [0, 1, 2, 3, 4], 239 ),
MAVLINK_MSG_ID_ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT : ( '<Iffff', MAVLink_roll_pitch_yaw_speed_thrust_setpoint_message, [0, 1, 2, 3, 4], 238 ),
MAVLINK_MSG_ID_SET_QUAD_MOTORS_SETPOINT : ( '<HHHHB', MAVLink_set_quad_motors_setpoint_message, [4, 0, 1, 2, 3], 30 ),
MAVLINK_MSG_ID_SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST : ( '<4h4h4h4HBB', MAVLink_set_quad_swarm_roll_pitch_yaw_thrust_message, [4, 5, 0, 1, 2, 3], 240 ),
MAVLINK_MSG_ID_NAV_CONTROLLER_OUTPUT : ( '<fffffhhH', MAVLink_nav_controller_output_message, [0, 1, 5, 6, 7, 2, 3, 4], 183 ),
MAVLINK_MSG_ID_SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST : ( '<4h4h4h4HBB4s4s4s', MAVLink_set_quad_swarm_led_roll_pitch_yaw_thrust_message, [4, 5, 6, 7, 8, 0, 1, 2, 3], 130 ),
MAVLINK_MSG_ID_STATE_CORRECTION : ( '<fffffffff', MAVLink_state_correction_message, [0, 1, 2, 3, 4, 5, 6, 7, 8], 130 ),
MAVLINK_MSG_ID_REQUEST_DATA_STREAM : ( '<HBBBB', MAVLink_request_data_stream_message, [1, 2, 3, 0, 4], 148 ),
MAVLINK_MSG_ID_DATA_STREAM : ( '<HBB', MAVLink_data_stream_message, [1, 0, 2], 21 ),
MAVLINK_MSG_ID_MANUAL_CONTROL : ( '<hhhhHB', MAVLink_manual_control_message, [5, 0, 1, 2, 3, 4], 243 ),
MAVLINK_MSG_ID_RC_CHANNELS_OVERRIDE : ( '<HHHHHHHHBB', MAVLink_rc_channels_override_message, [8, 9, 0, 1, 2, 3, 4, 5, 6, 7], 124 ),
MAVLINK_MSG_ID_VFR_HUD : ( '<ffffhH', MAVLink_vfr_hud_message, [0, 1, 4, 5, 2, 3], 20 ),
MAVLINK_MSG_ID_COMMAND_LONG : ( '<fffffffHBBB', MAVLink_command_long_message, [8, 9, 7, 10, 0, 1, 2, 3, 4, 5, 6], 152 ),
MAVLINK_MSG_ID_COMMAND_ACK : ( '<HB', MAVLink_command_ack_message, [0, 1], 143 ),
MAVLINK_MSG_ID_ROLL_PITCH_YAW_RATES_THRUST_SETPOINT : ( '<Iffff', MAVLink_roll_pitch_yaw_rates_thrust_setpoint_message, [0, 1, 2, 3, 4], 127 ),
MAVLINK_MSG_ID_MANUAL_SETPOINT : ( '<IffffBB', MAVLink_manual_setpoint_message, [0, 1, 2, 3, 4, 5, 6], 106 ),
MAVLINK_MSG_ID_LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET : ( '<Iffffff', MAVLink_local_position_ned_system_global_offset_message, [0, 1, 2, 3, 4, 5, 6], 231 ),
MAVLINK_MSG_ID_HIL_STATE : ( '<Qffffffiiihhhhhh', MAVLink_hil_state_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 183 ),
MAVLINK_MSG_ID_HIL_CONTROLS : ( '<QffffffffBB', MAVLink_hil_controls_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 63 ),
MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW : ( '<QHHHHHHHHHHHHB', MAVLink_hil_rc_inputs_raw_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 54 ),
MAVLINK_MSG_ID_OPTICAL_FLOW : ( '<QfffhhBB', MAVLink_optical_flow_message, [0, 6, 4, 5, 1, 2, 7, 3], 175 ),
MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE : ( '<Qffffff', MAVLink_global_vision_position_estimate_message, [0, 1, 2, 3, 4, 5, 6], 102 ),
MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE : ( '<Qffffff', MAVLink_vision_position_estimate_message, [0, 1, 2, 3, 4, 5, 6], 158 ),
MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE : ( '<Qfff', MAVLink_vision_speed_estimate_message, [0, 1, 2, 3], 208 ),
MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE : ( '<Qffffff', MAVLink_vicon_position_estimate_message, [0, 1, 2, 3, 4, 5, 6], 56 ),
MAVLINK_MSG_ID_HIGHRES_IMU : ( '<QfffffffffffffH', MAVLink_highres_imu_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], 93 ),
MAVLINK_MSG_ID_FILE_TRANSFER_START : ( '<QI240sBB', MAVLink_file_transfer_start_message, [0, 2, 3, 1, 4], 235 ),
MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST : ( '<Q240sB', MAVLink_file_transfer_dir_list_message, [0, 1, 2], 93 ),
MAVLINK_MSG_ID_FILE_TRANSFER_RES : ( '<QB', MAVLink_file_transfer_res_message, [0, 1], 124 ),
MAVLINK_MSG_ID_BATTERY_STATUS : ( '<HHHHHHhBb', MAVLink_battery_status_message, [7, 0, 1, 2, 3, 4, 5, 6, 8], 42 ),
MAVLINK_MSG_ID_SETPOINT_8DOF : ( '<ffffffffB', MAVLink_setpoint_8dof_message, [8, 0, 1, 2, 3, 4, 5, 6, 7], 241 ),
MAVLINK_MSG_ID_SETPOINT_6DOF : ( '<ffffffB', MAVLink_setpoint_6dof_message, [6, 0, 1, 2, 3, 4, 5], 15 ),
MAVLINK_MSG_ID_MEMORY_VECT : ( '<HBB32s', MAVLink_memory_vect_message, [0, 1, 2, 3], 204 ),
MAVLINK_MSG_ID_DEBUG_VECT : ( '<Qfff10s', MAVLink_debug_vect_message, [4, 0, 1, 2, 3], 49 ),
MAVLINK_MSG_ID_NAMED_VALUE_FLOAT : ( '<If10s', MAVLink_named_value_float_message, [0, 2, 1], 170 ),
MAVLINK_MSG_ID_NAMED_VALUE_INT : ( '<Ii10s', MAVLink_named_value_int_message, [0, 2, 1], 44 ),
MAVLINK_MSG_ID_STATUSTEXT : ( '<B50s', MAVLink_statustext_message, [0, 1], 83 ),
MAVLINK_MSG_ID_DEBUG : ( '<IfB', MAVLink_debug_message, [0, 2, 1], 46 ),
}
class MAVError(Exception):
'''MAVLink error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVString(str):
'''NUL terminated string'''
def __init__(self, s):
str.__init__(self)
def __str__(self):
i = self.find(chr(0))
if i == -1:
return self[:]
return self[0:i]
class MAVLink_bad_data(MAVLink_message):
'''
a piece of bad data in a mavlink stream
'''
def __init__(self, data, reason):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BAD_DATA, 'BAD_DATA')
self._fieldnames = ['data', 'reason']
self.data = data
self.reason = reason
self._msgbuf = data
class MAVLink(object):
'''MAVLink protocol handling class'''
def __init__(self, file, srcSystem=0, srcComponent=0):
self.seq = 0
self.file = file
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.callback = None
self.callback_args = None
self.callback_kwargs = None
self.buf = array.array('B')
self.expected_length = 6
self.have_prefix_error = False
self.robust_parsing = False
self.protocol_marker = 254
self.little_endian = True
self.crc_extra = True
self.sort_fields = True
self.total_packets_sent = 0
self.total_bytes_sent = 0
self.total_packets_received = 0
self.total_bytes_received = 0
self.total_receive_errors = 0
self.startup_time = time.time()
def set_callback(self, callback, *args, **kwargs):
self.callback = callback
self.callback_args = args
self.callback_kwargs = kwargs
def send(self, mavmsg):
'''send a MAVLink message'''
buf = mavmsg.pack(self)
self.file.write(buf)
self.seq = (self.seq + 1) % 255
self.total_packets_sent += 1
self.total_bytes_sent += len(buf)
def bytes_needed(self):
'''return number of bytes needed for next parsing stage'''
ret = self.expected_length - len(self.buf)
if ret <= 0:
return 1
return ret
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
if isinstance(c, str):
self.buf.fromstring(c)
else:
self.buf.extend(c)
self.total_bytes_received += len(c)
if len(self.buf) >= 1 and self.buf[0] != 254:
magic = self.buf[0]
self.buf = self.buf[1:]
if self.robust_parsing:
m = MAVLink_bad_data(chr(magic), "Bad prefix")
if self.callback:
self.callback(m, *self.callback_args, **self.callback_kwargs)
self.expected_length = 6
self.total_receive_errors += 1
return m
if self.have_prefix_error:
return None
self.have_prefix_error = True
self.total_receive_errors += 1
raise MAVError("invalid MAVLink prefix '%s'" % magic)
self.have_prefix_error = False
if len(self.buf) >= 2:
(magic, self.expected_length) = struct.unpack('BB', self.buf[0:2])
self.expected_length += 8
if self.expected_length >= 8 and len(self.buf) >= self.expected_length:
mbuf = self.buf[0:self.expected_length]
self.buf = self.buf[self.expected_length:]
self.expected_length = 6
if self.robust_parsing:
try:
m = self.decode(mbuf)
self.total_packets_received += 1
except MAVError as reason:
m = MAVLink_bad_data(mbuf, reason.message)
self.total_receive_errors += 1
else:
m = self.decode(mbuf)
self.total_packets_received += 1
if self.callback:
self.callback(m, *self.callback_args, **self.callback_kwargs)
return m
return None
def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret
def decode(self, msgbuf):
'''decode a buffer as a MAVLink message'''
# decode the header
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('cBBBBB', msgbuf[:6])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
if ord(magic) != 254:
raise MAVError("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-8:
raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u' % (len(msgbuf)-8, mlen, msgId))
if not msgId in mavlink_map:
raise MAVError('unknown MAVLink message ID %u' % msgId)
# decode the payload
(fmt, type, order_map, crc_extra) = mavlink_map[msgId]
# decode the checksum
try:
crc, = struct.unpack('<H', msgbuf[-2:])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg)
crc2 = mavutil.x25crc(msgbuf[1:-2])
if True: # using CRC extra
crc2.accumulate(chr(crc_extra))
if crc != crc2.crc:
raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
try:
t = struct.unpack(fmt, msgbuf[6:-2])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(msgbuf[6:-2]), emsg))
tlist = list(t)
# handle sorted fields
if True:
t = tlist[:]
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
# terminate any strings
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = MAVString(tlist[i])
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._msgbuf = msgbuf
m._payload = msgbuf[6:-2]
m._crc = crc
m._header = MAVLink_header(msgId, mlen, seq, srcSystem, srcComponent)
return m
def heartbeat_encode(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=3):
'''
The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version, not writable by user, gets added by protocol because of magic data type: uint8_t_mavlink_version (uint8_t)
'''
msg = MAVLink_heartbeat_message(type, autopilot, base_mode, custom_mode, system_status, mavlink_version)
msg.pack(self)
return msg
def heartbeat_send(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=3):
'''
The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version, not writable by user, gets added by protocol because of magic data type: uint8_t_mavlink_version (uint8_t)
'''
return self.send(self.heartbeat_encode(type, autopilot, base_mode, custom_mode, system_status, mavlink_version))
def sys_status_encode(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4):
'''
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
'''
msg = MAVLink_sys_status_message(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4)
msg.pack(self)
return msg
def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4):
'''
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
'''
return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4))
def system_time_encode(self, time_unix_usec, time_boot_ms):
'''
The system time is the time of the master clock, typically the
computer clock of the main onboard computer.
time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t)
time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t)
'''
msg = MAVLink_system_time_message(time_unix_usec, time_boot_ms)
msg.pack(self)
return msg
def system_time_send(self, time_unix_usec, time_boot_ms):
'''
The system time is the time of the master clock, typically the
computer clock of the main onboard computer.
time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t)
time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t)
'''
return self.send(self.system_time_encode(time_unix_usec, time_boot_ms))
def ping_encode(self, time_usec, seq, target_system, target_component):
'''
A ping message either requesting or responding to a ping. This allows
to measure the system latencies, including serial
port, radio modem and UDP connections.
time_usec : Unix timestamp in microseconds (uint64_t)
seq : PING sequence (uint32_t)
target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
'''
msg = MAVLink_ping_message(time_usec, seq, target_system, target_component)
msg.pack(self)
return msg
def ping_send(self, time_usec, seq, target_system, target_component):
'''
A ping message either requesting or responding to a ping. This allows
to measure the system latencies, including serial
port, radio modem and UDP connections.
time_usec : Unix timestamp in microseconds (uint64_t)
seq : PING sequence (uint32_t)
target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t)
'''
return self.send(self.ping_encode(time_usec, seq, target_system, target_component))
def change_operator_control_encode(self, target_system, control_request, version, passkey):
'''
Request to control this MAV
target_system : System the GCS requests control for (uint8_t)
control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t)
version : 0: key as plaintext, 1-255: future, different hashing/encryption variants. The GCS should in general use the safest mode possible initially and then gradually move down the encryption level if it gets a NACK message indicating an encryption mismatch. (uint8_t)
passkey : Password / Key, depending on version plaintext or encrypted. 25 or less characters, NULL terminated. The characters may involve A-Z, a-z, 0-9, and "!?,.-" (char)
'''
msg = MAVLink_change_operator_control_message(target_system, control_request, version, passkey)
msg.pack(self)
return msg
def change_operator_control_send(self, target_system, control_request, version, passkey):
'''
Request to control this MAV
target_system : System the GCS requests control for (uint8_t)
control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t)
version : 0: key as plaintext, 1-255: future, different hashing/encryption variants. The GCS should in general use the safest mode possible initially and then gradually move down the encryption level if it gets a NACK message indicating an encryption mismatch. (uint8_t)
passkey : Password / Key, depending on version plaintext or encrypted. 25 or less characters, NULL terminated. The characters may involve A-Z, a-z, 0-9, and "!?,.-" (char)
'''
return self.send(self.change_operator_control_encode(target_system, control_request, version, passkey))
def change_operator_control_ack_encode(self, gcs_system_id, control_request, ack):
'''
Accept / deny control of this MAV
gcs_system_id : ID of the GCS this message (uint8_t)
control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t)
ack : 0: ACK, 1: NACK: Wrong passkey, 2: NACK: Unsupported passkey encryption method, 3: NACK: Already under control (uint8_t)
'''
msg = MAVLink_change_operator_control_ack_message(gcs_system_id, control_request, ack)
msg.pack(self)
return msg
def change_operator_control_ack_send(self, gcs_system_id, control_request, ack):
'''
Accept / deny control of this MAV
gcs_system_id : ID of the GCS this message (uint8_t)
control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t)
ack : 0: ACK, 1: NACK: Wrong passkey, 2: NACK: Unsupported passkey encryption method, 3: NACK: Already under control (uint8_t)
'''
return self.send(self.change_operator_control_ack_encode(gcs_system_id, control_request, ack))
def auth_key_encode(self, key):
'''
Emit an encrypted signature / key identifying this system. PLEASE
NOTE: This protocol has been kept simple, so
transmitting the key requires an encrypted channel for
true safety.
key : key (char)
'''
msg = MAVLink_auth_key_message(key)
msg.pack(self)
return msg
def auth_key_send(self, key):
'''
Emit an encrypted signature / key identifying this system. PLEASE
NOTE: This protocol has been kept simple, so
transmitting the key requires an encrypted channel for
true safety.
key : key (char)
'''
return self.send(self.auth_key_encode(key))
def set_mode_encode(self, target_system, base_mode, custom_mode):
'''
Set the system mode, as defined by enum MAV_MODE. There is no target
component id as the mode is by definition for the
overall aircraft, not only for one component.
target_system : The system setting the mode (uint8_t)
base_mode : The new base mode (uint8_t)
custom_mode : The new autopilot-specific mode. This field can be ignored by an autopilot. (uint32_t)
'''
msg = MAVLink_set_mode_message(target_system, base_mode, custom_mode)
msg.pack(self)
return msg
def set_mode_send(self, target_system, base_mode, custom_mode):
'''
Set the system mode, as defined by enum MAV_MODE. There is no target
component id as the mode is by definition for the
overall aircraft, not only for one component.
target_system : The system setting the mode (uint8_t)
base_mode : The new base mode (uint8_t)
custom_mode : The new autopilot-specific mode. This field can be ignored by an autopilot. (uint32_t)
'''
return self.send(self.set_mode_encode(target_system, base_mode, custom_mode))
def param_request_read_encode(self, target_system, target_component, param_id, param_index):
'''
Request to read the onboard parameter with the param_id string id.
Onboard parameters are stored as key[const char*] ->
value[float]. This allows to send a parameter to any
other component (such as the GCS) without the need of
previous knowledge of possible parameter names. Thus
the same GCS can store different parameters for
different autopilots. See also
http://qgroundcontrol.org/parameter_interface for a
full documentation of QGroundControl and IMU code.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored) (int16_t)
'''
msg = MAVLink_param_request_read_message(target_system, target_component, param_id, param_index)
msg.pack(self)
return msg
def param_request_read_send(self, target_system, target_component, param_id, param_index):
'''
Request to read the onboard parameter with the param_id string id.
Onboard parameters are stored as key[const char*] ->
value[float]. This allows to send a parameter to any
other component (such as the GCS) without the need of
previous knowledge of possible parameter names. Thus
the same GCS can store different parameters for
different autopilots. See also
http://qgroundcontrol.org/parameter_interface for a
full documentation of QGroundControl and IMU code.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored) (int16_t)
'''
return self.send(self.param_request_read_encode(target_system, target_component, param_id, param_index))
def param_request_list_encode(self, target_system, target_component):
'''
Request all parameters of this component. After his request, all
parameters are emitted.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
msg = MAVLink_param_request_list_message(target_system, target_component)
msg.pack(self)
return msg
def param_request_list_send(self, target_system, target_component):
'''
Request all parameters of this component. After his request, all
parameters are emitted.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
return self.send(self.param_request_list_encode(target_system, target_component))
def param_value_encode(self, param_id, param_value, param_type, param_count, param_index):
'''
Emit the value of a onboard parameter. The inclusion of param_count
and param_index in the message allows the recipient to
keep track of received parameters and allows him to
re-request missing parameters after a loss or timeout.
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_value : Onboard parameter value (float)
param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t)
param_count : Total number of onboard parameters (uint16_t)
param_index : Index of this onboard parameter (uint16_t)
'''
msg = MAVLink_param_value_message(param_id, param_value, param_type, param_count, param_index)
msg.pack(self)
return msg
def param_value_send(self, param_id, param_value, param_type, param_count, param_index):
'''
Emit the value of a onboard parameter. The inclusion of param_count
and param_index in the message allows the recipient to
keep track of received parameters and allows him to
re-request missing parameters after a loss or timeout.
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_value : Onboard parameter value (float)
param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t)
param_count : Total number of onboard parameters (uint16_t)
param_index : Index of this onboard parameter (uint16_t)
'''
return self.send(self.param_value_encode(param_id, param_value, param_type, param_count, param_index))
def param_set_encode(self, target_system, target_component, param_id, param_value, param_type):
'''
Set a parameter value TEMPORARILY to RAM. It will be reset to default
on system reboot. Send the ACTION
MAV_ACTION_STORAGE_WRITE to PERMANENTLY write the RAM
contents to EEPROM. IMPORTANT: The receiving component
should acknowledge the new parameter value by sending
a param_value message to all communication partners.
This will also ensure that multiple GCS all have an
up-to-date list of all parameters. If the sending GCS
did not receive a PARAM_VALUE message within its
timeout time, it should re-send the PARAM_SET message.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_value : Onboard parameter value (float)
param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t)
'''
msg = MAVLink_param_set_message(target_system, target_component, param_id, param_value, param_type)
msg.pack(self)
return msg
def param_set_send(self, target_system, target_component, param_id, param_value, param_type):
'''
Set a parameter value TEMPORARILY to RAM. It will be reset to default
on system reboot. Send the ACTION
MAV_ACTION_STORAGE_WRITE to PERMANENTLY write the RAM
contents to EEPROM. IMPORTANT: The receiving component
should acknowledge the new parameter value by sending
a param_value message to all communication partners.
This will also ensure that multiple GCS all have an
up-to-date list of all parameters. If the sending GCS
did not receive a PARAM_VALUE message within its
timeout time, it should re-send the PARAM_SET message.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_value : Onboard parameter value (float)
param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t)
'''
return self.send(self.param_set_encode(target_system, target_component, param_id, param_value, param_type))
def gps_raw_int_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible):
'''
The global position, as returned by the Global Positioning System
(GPS). This is NOT the global position
estimate of the sytem, but rather a RAW sensor value.
See message GLOBAL_POSITION for the global position
estimate. Coordinate frame is right-handed, Z-axis up
(GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude in 1E7 degrees (int32_t)
lon : Longitude in 1E7 degrees (int32_t)
alt : Altitude in 1E3 meters (millimeters) above MSL (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
'''
msg = MAVLink_gps_raw_int_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible)
msg.pack(self)
return msg
def gps_raw_int_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible):
'''
The global position, as returned by the Global Positioning System
(GPS). This is NOT the global position
estimate of the sytem, but rather a RAW sensor value.
See message GLOBAL_POSITION for the global position
estimate. Coordinate frame is right-handed, Z-axis up
(GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude in 1E7 degrees (int32_t)
lon : Longitude in 1E7 degrees (int32_t)
alt : Altitude in 1E3 meters (millimeters) above MSL (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
'''
return self.send(self.gps_raw_int_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible))
def gps_status_encode(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr):
'''
The positioning status, as reported by GPS. This message is intended
to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION
for the global position estimate. This message can
contain information for up to 20 satellites.
satellites_visible : Number of satellites visible (uint8_t)
satellite_prn : Global satellite ID (uint8_t)
satellite_used : 0: Satellite not used, 1: used for localization (uint8_t)
satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t)
satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t)
satellite_snr : Signal to noise ratio of satellite (uint8_t)
'''
msg = MAVLink_gps_status_message(satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr)
msg.pack(self)
return msg
def gps_status_send(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr):
'''
The positioning status, as reported by GPS. This message is intended
to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION
for the global position estimate. This message can
contain information for up to 20 satellites.
satellites_visible : Number of satellites visible (uint8_t)
satellite_prn : Global satellite ID (uint8_t)
satellite_used : 0: Satellite not used, 1: used for localization (uint8_t)
satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t)
satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t)
satellite_snr : Signal to noise ratio of satellite (uint8_t)
'''
return self.send(self.gps_status_encode(satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr))
def scaled_imu_encode(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This message
should contain the scaled values to the described
units
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
xacc : X acceleration (mg) (int16_t)
yacc : Y acceleration (mg) (int16_t)
zacc : Z acceleration (mg) (int16_t)
xgyro : Angular speed around X axis (millirad /sec) (int16_t)
ygyro : Angular speed around Y axis (millirad /sec) (int16_t)
zgyro : Angular speed around Z axis (millirad /sec) (int16_t)
xmag : X Magnetic field (milli tesla) (int16_t)
ymag : Y Magnetic field (milli tesla) (int16_t)
zmag : Z Magnetic field (milli tesla) (int16_t)
'''
msg = MAVLink_scaled_imu_message(time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag)
msg.pack(self)
return msg
def scaled_imu_send(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This message
should contain the scaled values to the described
units
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
xacc : X acceleration (mg) (int16_t)
yacc : Y acceleration (mg) (int16_t)
zacc : Z acceleration (mg) (int16_t)
xgyro : Angular speed around X axis (millirad /sec) (int16_t)
ygyro : Angular speed around Y axis (millirad /sec) (int16_t)
zgyro : Angular speed around Z axis (millirad /sec) (int16_t)
xmag : X Magnetic field (milli tesla) (int16_t)
ymag : Y Magnetic field (milli tesla) (int16_t)
zmag : Z Magnetic field (milli tesla) (int16_t)
'''
return self.send(self.scaled_imu_encode(time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag))
def raw_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This message
should always contain the true raw values without any
scaling to allow data capture and system debugging.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
xacc : X acceleration (raw) (int16_t)
yacc : Y acceleration (raw) (int16_t)
zacc : Z acceleration (raw) (int16_t)
xgyro : Angular speed around X axis (raw) (int16_t)
ygyro : Angular speed around Y axis (raw) (int16_t)
zgyro : Angular speed around Z axis (raw) (int16_t)
xmag : X Magnetic field (raw) (int16_t)
ymag : Y Magnetic field (raw) (int16_t)
zmag : Z Magnetic field (raw) (int16_t)
'''
msg = MAVLink_raw_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag)
msg.pack(self)
return msg
def raw_imu_send(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag):
'''
The RAW IMU readings for the usual 9DOF sensor setup. This message
should always contain the true raw values without any
scaling to allow data capture and system debugging.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
xacc : X acceleration (raw) (int16_t)
yacc : Y acceleration (raw) (int16_t)
zacc : Z acceleration (raw) (int16_t)
xgyro : Angular speed around X axis (raw) (int16_t)
ygyro : Angular speed around Y axis (raw) (int16_t)
zgyro : Angular speed around Z axis (raw) (int16_t)
xmag : X Magnetic field (raw) (int16_t)
ymag : Y Magnetic field (raw) (int16_t)
zmag : Z Magnetic field (raw) (int16_t)
'''
return self.send(self.raw_imu_encode(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag))
def raw_pressure_encode(self, time_usec, press_abs, press_diff1, press_diff2, temperature):
'''
The RAW pressure readings for the typical setup of one absolute
pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
press_abs : Absolute pressure (raw) (int16_t)
press_diff1 : Differential pressure 1 (raw) (int16_t)
press_diff2 : Differential pressure 2 (raw) (int16_t)
temperature : Raw Temperature measurement (raw) (int16_t)
'''
msg = MAVLink_raw_pressure_message(time_usec, press_abs, press_diff1, press_diff2, temperature)
msg.pack(self)
return msg
def raw_pressure_send(self, time_usec, press_abs, press_diff1, press_diff2, temperature):
'''
The RAW pressure readings for the typical setup of one absolute
pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
press_abs : Absolute pressure (raw) (int16_t)
press_diff1 : Differential pressure 1 (raw) (int16_t)
press_diff2 : Differential pressure 2 (raw) (int16_t)
temperature : Raw Temperature measurement (raw) (int16_t)
'''
return self.send(self.raw_pressure_encode(time_usec, press_abs, press_diff1, press_diff2, temperature))
def scaled_pressure_encode(self, time_boot_ms, press_abs, press_diff, temperature):
'''
The pressure readings for the typical setup of one absolute and
differential pressure sensor. The units are as
specified in each field.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
msg = MAVLink_scaled_pressure_message(time_boot_ms, press_abs, press_diff, temperature)
msg.pack(self)
return msg
def scaled_pressure_send(self, time_boot_ms, press_abs, press_diff, temperature):
'''
The pressure readings for the typical setup of one absolute and
differential pressure sensor. The units are as
specified in each field.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
return self.send(self.scaled_pressure_encode(time_boot_ms, press_abs, press_diff, temperature))
def attitude_encode(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
roll : Roll angle (rad, -pi..+pi) (float)
pitch : Pitch angle (rad, -pi..+pi) (float)
yaw : Yaw angle (rad, -pi..+pi) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
msg = MAVLink_attitude_message(time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed)
msg.pack(self)
return msg
def attitude_send(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
roll : Roll angle (rad, -pi..+pi) (float)
pitch : Pitch angle (rad, -pi..+pi) (float)
yaw : Yaw angle (rad, -pi..+pi) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
return self.send(self.attitude_encode(time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed))
def attitude_quaternion_encode(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1 (float)
q2 : Quaternion component 2 (float)
q3 : Quaternion component 3 (float)
q4 : Quaternion component 4 (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
msg = MAVLink_attitude_quaternion_message(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed)
msg.pack(self)
return msg
def attitude_quaternion_send(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1 (float)
q2 : Quaternion component 2 (float)
q3 : Quaternion component 3 (float)
q4 : Quaternion component 4 (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
return self.send(self.attitude_quaternion_encode(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed))
def local_position_ned_encode(self, time_boot_ms, x, y, z, vx, vy, vz):
'''
The filtered local position (e.g. fused computer vision and
accelerometers). Coordinate frame is right-handed,
Z-axis down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
vx : X Speed (float)
vy : Y Speed (float)
vz : Z Speed (float)
'''
msg = MAVLink_local_position_ned_message(time_boot_ms, x, y, z, vx, vy, vz)
msg.pack(self)
return msg
def local_position_ned_send(self, time_boot_ms, x, y, z, vx, vy, vz):
'''
The filtered local position (e.g. fused computer vision and
accelerometers). Coordinate frame is right-handed,
Z-axis down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
vx : X Speed (float)
vy : Y Speed (float)
vz : Z Speed (float)
'''
return self.send(self.local_position_ned_encode(time_boot_ms, x, y, z, vx, vy, vz))
def global_position_int_encode(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg):
'''
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It
is designed as scaled integer message since the
resolution of float is not sufficient.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
lat : Latitude, expressed as * 1E7 (int32_t)
lon : Longitude, expressed as * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t)
vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t)
vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t)
hdg : Compass heading in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
'''
msg = MAVLink_global_position_int_message(time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg)
msg.pack(self)
return msg
def global_position_int_send(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg):
'''
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It
is designed as scaled integer message since the
resolution of float is not sufficient.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
lat : Latitude, expressed as * 1E7 (int32_t)
lon : Longitude, expressed as * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t)
vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t)
vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t)
hdg : Compass heading in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
'''
return self.send(self.global_position_int_encode(time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg))
def rc_channels_scaled_encode(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi):
'''
The scaled values of the RC channels received. (-100%) -10000, (0%) 0,
(100%) 10000. Channels that are inactive should be set
to 65535.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
msg = MAVLink_rc_channels_scaled_message(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi)
msg.pack(self)
return msg
def rc_channels_scaled_send(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi):
'''
The scaled values of the RC channels received. (-100%) -10000, (0%) 0,
(100%) 10000. Channels that are inactive should be set
to 65535.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
return self.send(self.rc_channels_scaled_encode(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi))
def rc_channels_raw_encode(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi):
'''
The RAW values of the RC channels received. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters
might violate this specification.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_raw : RC channel 1 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan2_raw : RC channel 2 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan3_raw : RC channel 3 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan4_raw : RC channel 4 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan5_raw : RC channel 5 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan6_raw : RC channel 6 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan7_raw : RC channel 7 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan8_raw : RC channel 8 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
msg = MAVLink_rc_channels_raw_message(time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi)
msg.pack(self)
return msg
def rc_channels_raw_send(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi):
'''
The RAW values of the RC channels received. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters
might violate this specification.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_raw : RC channel 1 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan2_raw : RC channel 2 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan3_raw : RC channel 3 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan4_raw : RC channel 4 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan5_raw : RC channel 5 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan6_raw : RC channel 6 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan7_raw : RC channel 7 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
chan8_raw : RC channel 8 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
return self.send(self.rc_channels_raw_encode(time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi))
def servo_output_raw_encode(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw):
'''
The RAW values of the servo outputs (for RC input from the remote, use
the RC_CHANNELS messages). The standard PPM modulation
is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%.
time_boot_ms : Timestamp (microseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows to encode more than 8 servos. (uint8_t)
servo1_raw : Servo output 1 value, in microseconds (uint16_t)
servo2_raw : Servo output 2 value, in microseconds (uint16_t)
servo3_raw : Servo output 3 value, in microseconds (uint16_t)
servo4_raw : Servo output 4 value, in microseconds (uint16_t)
servo5_raw : Servo output 5 value, in microseconds (uint16_t)
servo6_raw : Servo output 6 value, in microseconds (uint16_t)
servo7_raw : Servo output 7 value, in microseconds (uint16_t)
servo8_raw : Servo output 8 value, in microseconds (uint16_t)
'''
msg = MAVLink_servo_output_raw_message(time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw)
msg.pack(self)
return msg
def servo_output_raw_send(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw):
'''
The RAW values of the servo outputs (for RC input from the remote, use
the RC_CHANNELS messages). The standard PPM modulation
is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%.
time_boot_ms : Timestamp (microseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows to encode more than 8 servos. (uint8_t)
servo1_raw : Servo output 1 value, in microseconds (uint16_t)
servo2_raw : Servo output 2 value, in microseconds (uint16_t)
servo3_raw : Servo output 3 value, in microseconds (uint16_t)
servo4_raw : Servo output 4 value, in microseconds (uint16_t)
servo5_raw : Servo output 5 value, in microseconds (uint16_t)
servo6_raw : Servo output 6 value, in microseconds (uint16_t)
servo7_raw : Servo output 7 value, in microseconds (uint16_t)
servo8_raw : Servo output 8 value, in microseconds (uint16_t)
'''
return self.send(self.servo_output_raw_encode(time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw))
def mission_request_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t)
'''
msg = MAVLink_mission_request_partial_list_message(target_system, target_component, start_index, end_index)
msg.pack(self)
return msg
def mission_request_partial_list_send(self, target_system, target_component, start_index, end_index):
'''
Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t)
'''
return self.send(self.mission_request_partial_list_encode(target_system, target_component, start_index, end_index))
def mission_write_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
This message is sent to the MAV to write a partial list. If start
index == end index, only one item will be transmitted
/ updated. If the start index is NOT 0 and above the
current list size, this request should be REJECTED!
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t)
end_index : End index, equal or greater than start index. (int16_t)
'''
msg = MAVLink_mission_write_partial_list_message(target_system, target_component, start_index, end_index)
msg.pack(self)
return msg
def mission_write_partial_list_send(self, target_system, target_component, start_index, end_index):
'''
This message is sent to the MAV to write a partial list. If start
index == end index, only one item will be transmitted
/ updated. If the start index is NOT 0 and above the
current list size, this request should be REJECTED!
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t)
end_index : End index, equal or greater than start index. (int16_t)
'''
return self.send(self.mission_write_partial_list_encode(target_system, target_component, start_index, end_index))
def mission_item_encode(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z):
'''
Message encoding a mission item. This message is emitted to announce
the presence of a mission item and to set a mission
item on the system. The mission item can be either in
x, y, z meters (type: LOCAL) or x:lat, y:lon,
z:altitude. Local frame is Z-down, right handed (NED),
global frame is Z-up, right handed (ENU). See also
http://qgroundcontrol.org/mavlink/waypoint_protocol.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
frame : The coordinate system of the MISSION. see MAV_FRAME in mavlink_types.h (uint8_t)
command : The scheduled action for the MISSION. see MAV_CMD in common.xml MAVLink specs (uint16_t)
current : false:0, true:1 (uint8_t)
autocontinue : autocontinue to next wp (uint8_t)
param1 : PARAM1 / For NAV command MISSIONs: Radius in which the MISSION is accepted as reached, in meters (float)
param2 : PARAM2 / For NAV command MISSIONs: Time that the MAV should stay inside the PARAM1 radius before advancing, in milliseconds (float)
param3 : PARAM3 / For LOITER command MISSIONs: Orbit to circle around the MISSION, in meters. If positive the orbit direction should be clockwise, if negative the orbit direction should be counter-clockwise. (float)
param4 : PARAM4 / For NAV and LOITER command MISSIONs: Yaw orientation in degrees, [0..360] 0 = NORTH (float)
x : PARAM5 / local: x position, global: latitude (float)
y : PARAM6 / y position: global: longitude (float)
z : PARAM7 / z position: global: altitude (float)
'''
msg = MAVLink_mission_item_message(target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z)
msg.pack(self)
return msg
def mission_item_send(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z):
'''
Message encoding a mission item. This message is emitted to announce
the presence of a mission item and to set a mission
item on the system. The mission item can be either in
x, y, z meters (type: LOCAL) or x:lat, y:lon,
z:altitude. Local frame is Z-down, right handed (NED),
global frame is Z-up, right handed (ENU). See also
http://qgroundcontrol.org/mavlink/waypoint_protocol.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
frame : The coordinate system of the MISSION. see MAV_FRAME in mavlink_types.h (uint8_t)
command : The scheduled action for the MISSION. see MAV_CMD in common.xml MAVLink specs (uint16_t)
current : false:0, true:1 (uint8_t)
autocontinue : autocontinue to next wp (uint8_t)
param1 : PARAM1 / For NAV command MISSIONs: Radius in which the MISSION is accepted as reached, in meters (float)
param2 : PARAM2 / For NAV command MISSIONs: Time that the MAV should stay inside the PARAM1 radius before advancing, in milliseconds (float)
param3 : PARAM3 / For LOITER command MISSIONs: Orbit to circle around the MISSION, in meters. If positive the orbit direction should be clockwise, if negative the orbit direction should be counter-clockwise. (float)
param4 : PARAM4 / For NAV and LOITER command MISSIONs: Yaw orientation in degrees, [0..360] 0 = NORTH (float)
x : PARAM5 / local: x position, global: latitude (float)
y : PARAM6 / y position: global: longitude (float)
z : PARAM7 / z position: global: altitude (float)
'''
return self.send(self.mission_item_encode(target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z))
def mission_request_encode(self, target_system, target_component, seq):
'''
Request the information of the mission item with the sequence number
seq. The response of the system to this message should
be a MISSION_ITEM message.
http://qgroundcontrol.org/mavlink/waypoint_protocol
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
'''
msg = MAVLink_mission_request_message(target_system, target_component, seq)
msg.pack(self)
return msg
def mission_request_send(self, target_system, target_component, seq):
'''
Request the information of the mission item with the sequence number
seq. The response of the system to this message should
be a MISSION_ITEM message.
http://qgroundcontrol.org/mavlink/waypoint_protocol
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
'''
return self.send(self.mission_request_encode(target_system, target_component, seq))
def mission_set_current_encode(self, target_system, target_component, seq):
'''
Set the mission item with sequence number seq as current item. This
means that the MAV will continue to this mission item
on the shortest path (not following the mission items
in-between).
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
'''
msg = MAVLink_mission_set_current_message(target_system, target_component, seq)
msg.pack(self)
return msg
def mission_set_current_send(self, target_system, target_component, seq):
'''
Set the mission item with sequence number seq as current item. This
means that the MAV will continue to this mission item
on the shortest path (not following the mission items
in-between).
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
'''
return self.send(self.mission_set_current_encode(target_system, target_component, seq))
def mission_current_encode(self, seq):
'''
Message that announces the sequence number of the current active
mission item. The MAV will fly towards this mission
item.
seq : Sequence (uint16_t)
'''
msg = MAVLink_mission_current_message(seq)
msg.pack(self)
return msg
def mission_current_send(self, seq):
'''
Message that announces the sequence number of the current active
mission item. The MAV will fly towards this mission
item.
seq : Sequence (uint16_t)
'''
return self.send(self.mission_current_encode(seq))
def mission_request_list_encode(self, target_system, target_component):
'''
Request the overall list of mission items from the system/component.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
msg = MAVLink_mission_request_list_message(target_system, target_component)
msg.pack(self)
return msg
def mission_request_list_send(self, target_system, target_component):
'''
Request the overall list of mission items from the system/component.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
return self.send(self.mission_request_list_encode(target_system, target_component))
def mission_count_encode(self, target_system, target_component, count):
'''
This message is emitted as response to MISSION_REQUEST_LIST by the MAV
and to initiate a write transaction. The GCS can then
request the individual mission item based on the
knowledge of the total number of MISSIONs.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
count : Number of mission items in the sequence (uint16_t)
'''
msg = MAVLink_mission_count_message(target_system, target_component, count)
msg.pack(self)
return msg
def mission_count_send(self, target_system, target_component, count):
'''
This message is emitted as response to MISSION_REQUEST_LIST by the MAV
and to initiate a write transaction. The GCS can then
request the individual mission item based on the
knowledge of the total number of MISSIONs.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
count : Number of mission items in the sequence (uint16_t)
'''
return self.send(self.mission_count_encode(target_system, target_component, count))
def mission_clear_all_encode(self, target_system, target_component):
'''
Delete all mission items at once.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
msg = MAVLink_mission_clear_all_message(target_system, target_component)
msg.pack(self)
return msg
def mission_clear_all_send(self, target_system, target_component):
'''
Delete all mission items at once.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
return self.send(self.mission_clear_all_encode(target_system, target_component))
def mission_item_reached_encode(self, seq):
'''
A certain mission item has been reached. The system will either hold
this position (or circle on the orbit) or (if the
autocontinue on the WP was set) continue to the next
MISSION.
seq : Sequence (uint16_t)
'''
msg = MAVLink_mission_item_reached_message(seq)
msg.pack(self)
return msg
def mission_item_reached_send(self, seq):
'''
A certain mission item has been reached. The system will either hold
this position (or circle on the orbit) or (if the
autocontinue on the WP was set) continue to the next
MISSION.
seq : Sequence (uint16_t)
'''
return self.send(self.mission_item_reached_encode(seq))
def mission_ack_encode(self, target_system, target_component, type):
'''
Ack message during MISSION handling. The type field states if this
message is a positive ack (type=0) or if an error
happened (type=non-zero).
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
type : See MAV_MISSION_RESULT enum (uint8_t)
'''
msg = MAVLink_mission_ack_message(target_system, target_component, type)
msg.pack(self)
return msg
def mission_ack_send(self, target_system, target_component, type):
'''
Ack message during MISSION handling. The type field states if this
message is a positive ack (type=0) or if an error
happened (type=non-zero).
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
type : See MAV_MISSION_RESULT enum (uint8_t)
'''
return self.send(self.mission_ack_encode(target_system, target_component, type))
def set_gps_global_origin_encode(self, target_system, latitude, longitude, altitude):
'''
As local waypoints exist, the global MISSION reference allows to
transform between the local coordinate frame and the
global (GPS) coordinate frame. This can be necessary
when e.g. in- and outdoor settings are connected and
the MAV should move from in- to outdoor.
target_system : System ID (uint8_t)
latitude : global position * 1E7 (int32_t)
longitude : global position * 1E7 (int32_t)
altitude : global position * 1000 (int32_t)
'''
msg = MAVLink_set_gps_global_origin_message(target_system, latitude, longitude, altitude)
msg.pack(self)
return msg
def set_gps_global_origin_send(self, target_system, latitude, longitude, altitude):
'''
As local waypoints exist, the global MISSION reference allows to
transform between the local coordinate frame and the
global (GPS) coordinate frame. This can be necessary
when e.g. in- and outdoor settings are connected and
the MAV should move from in- to outdoor.
target_system : System ID (uint8_t)
latitude : global position * 1E7 (int32_t)
longitude : global position * 1E7 (int32_t)
altitude : global position * 1000 (int32_t)
'''
return self.send(self.set_gps_global_origin_encode(target_system, latitude, longitude, altitude))
def gps_global_origin_encode(self, latitude, longitude, altitude):
'''
Once the MAV sets a new GPS-Local correspondence, this message
announces the origin (0,0,0) position
latitude : Latitude (WGS84), expressed as * 1E7 (int32_t)
longitude : Longitude (WGS84), expressed as * 1E7 (int32_t)
altitude : Altitude(WGS84), expressed as * 1000 (int32_t)
'''
msg = MAVLink_gps_global_origin_message(latitude, longitude, altitude)
msg.pack(self)
return msg
def gps_global_origin_send(self, latitude, longitude, altitude):
'''
Once the MAV sets a new GPS-Local correspondence, this message
announces the origin (0,0,0) position
latitude : Latitude (WGS84), expressed as * 1E7 (int32_t)
longitude : Longitude (WGS84), expressed as * 1E7 (int32_t)
altitude : Altitude(WGS84), expressed as * 1000 (int32_t)
'''
return self.send(self.gps_global_origin_encode(latitude, longitude, altitude))
def set_local_position_setpoint_encode(self, target_system, target_component, coordinate_frame, x, y, z, yaw):
'''
Set the setpoint for a local position controller. This is the position
in local coordinates the MAV should fly to. This
message is sent by the path/MISSION planner to the
onboard position controller. As some MAVs have a
degree of freedom in yaw (e.g. all
helicopters/quadrotors), the desired yaw angle is part
of the message.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t)
x : x position (float)
y : y position (float)
z : z position (float)
yaw : Desired yaw angle (float)
'''
msg = MAVLink_set_local_position_setpoint_message(target_system, target_component, coordinate_frame, x, y, z, yaw)
msg.pack(self)
return msg
def set_local_position_setpoint_send(self, target_system, target_component, coordinate_frame, x, y, z, yaw):
'''
Set the setpoint for a local position controller. This is the position
in local coordinates the MAV should fly to. This
message is sent by the path/MISSION planner to the
onboard position controller. As some MAVs have a
degree of freedom in yaw (e.g. all
helicopters/quadrotors), the desired yaw angle is part
of the message.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t)
x : x position (float)
y : y position (float)
z : z position (float)
yaw : Desired yaw angle (float)
'''
return self.send(self.set_local_position_setpoint_encode(target_system, target_component, coordinate_frame, x, y, z, yaw))
def local_position_setpoint_encode(self, coordinate_frame, x, y, z, yaw):
'''
Transmit the current local setpoint of the controller to other MAVs
(collision avoidance) and to the GCS.
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t)
x : x position (float)
y : y position (float)
z : z position (float)
yaw : Desired yaw angle (float)
'''
msg = MAVLink_local_position_setpoint_message(coordinate_frame, x, y, z, yaw)
msg.pack(self)
return msg
def local_position_setpoint_send(self, coordinate_frame, x, y, z, yaw):
'''
Transmit the current local setpoint of the controller to other MAVs
(collision avoidance) and to the GCS.
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t)
x : x position (float)
y : y position (float)
z : z position (float)
yaw : Desired yaw angle (float)
'''
return self.send(self.local_position_setpoint_encode(coordinate_frame, x, y, z, yaw))
def global_position_setpoint_int_encode(self, coordinate_frame, latitude, longitude, altitude, yaw):
'''
Transmit the current local setpoint of the controller to other MAVs
(collision avoidance) and to the GCS.
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t)
latitude : WGS84 Latitude position in degrees * 1E7 (int32_t)
longitude : WGS84 Longitude position in degrees * 1E7 (int32_t)
altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t)
yaw : Desired yaw angle in degrees * 100 (int16_t)
'''
msg = MAVLink_global_position_setpoint_int_message(coordinate_frame, latitude, longitude, altitude, yaw)
msg.pack(self)
return msg
def global_position_setpoint_int_send(self, coordinate_frame, latitude, longitude, altitude, yaw):
'''
Transmit the current local setpoint of the controller to other MAVs
(collision avoidance) and to the GCS.
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t)
latitude : WGS84 Latitude position in degrees * 1E7 (int32_t)
longitude : WGS84 Longitude position in degrees * 1E7 (int32_t)
altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t)
yaw : Desired yaw angle in degrees * 100 (int16_t)
'''
return self.send(self.global_position_setpoint_int_encode(coordinate_frame, latitude, longitude, altitude, yaw))
def set_global_position_setpoint_int_encode(self, coordinate_frame, latitude, longitude, altitude, yaw):
'''
Set the current global position setpoint.
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t)
latitude : WGS84 Latitude position in degrees * 1E7 (int32_t)
longitude : WGS84 Longitude position in degrees * 1E7 (int32_t)
altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t)
yaw : Desired yaw angle in degrees * 100 (int16_t)
'''
msg = MAVLink_set_global_position_setpoint_int_message(coordinate_frame, latitude, longitude, altitude, yaw)
msg.pack(self)
return msg
def set_global_position_setpoint_int_send(self, coordinate_frame, latitude, longitude, altitude, yaw):
'''
Set the current global position setpoint.
coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t)
latitude : WGS84 Latitude position in degrees * 1E7 (int32_t)
longitude : WGS84 Longitude position in degrees * 1E7 (int32_t)
altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t)
yaw : Desired yaw angle in degrees * 100 (int16_t)
'''
return self.send(self.set_global_position_setpoint_int_encode(coordinate_frame, latitude, longitude, altitude, yaw))
def safety_set_allowed_area_encode(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z):
'''
Set a safety zone (volume), which is defined by two corners of a cube.
This message can be used to tell the MAV which
setpoints/MISSIONs to accept and which to reject.
Safety areas are often enforced by national or
competition regulations.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t)
p1x : x position 1 / Latitude 1 (float)
p1y : y position 1 / Longitude 1 (float)
p1z : z position 1 / Altitude 1 (float)
p2x : x position 2 / Latitude 2 (float)
p2y : y position 2 / Longitude 2 (float)
p2z : z position 2 / Altitude 2 (float)
'''
msg = MAVLink_safety_set_allowed_area_message(target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z)
msg.pack(self)
return msg
def safety_set_allowed_area_send(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z):
'''
Set a safety zone (volume), which is defined by two corners of a cube.
This message can be used to tell the MAV which
setpoints/MISSIONs to accept and which to reject.
Safety areas are often enforced by national or
competition regulations.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t)
p1x : x position 1 / Latitude 1 (float)
p1y : y position 1 / Longitude 1 (float)
p1z : z position 1 / Altitude 1 (float)
p2x : x position 2 / Latitude 2 (float)
p2y : y position 2 / Longitude 2 (float)
p2z : z position 2 / Altitude 2 (float)
'''
return self.send(self.safety_set_allowed_area_encode(target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z))
def safety_allowed_area_encode(self, frame, p1x, p1y, p1z, p2x, p2y, p2z):
'''
Read out the safety zone the MAV currently assumes.
frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t)
p1x : x position 1 / Latitude 1 (float)
p1y : y position 1 / Longitude 1 (float)
p1z : z position 1 / Altitude 1 (float)
p2x : x position 2 / Latitude 2 (float)
p2y : y position 2 / Longitude 2 (float)
p2z : z position 2 / Altitude 2 (float)
'''
msg = MAVLink_safety_allowed_area_message(frame, p1x, p1y, p1z, p2x, p2y, p2z)
msg.pack(self)
return msg
def safety_allowed_area_send(self, frame, p1x, p1y, p1z, p2x, p2y, p2z):
'''
Read out the safety zone the MAV currently assumes.
frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t)
p1x : x position 1 / Latitude 1 (float)
p1y : y position 1 / Longitude 1 (float)
p1z : z position 1 / Altitude 1 (float)
p2x : x position 2 / Latitude 2 (float)
p2y : y position 2 / Longitude 2 (float)
p2z : z position 2 / Altitude 2 (float)
'''
return self.send(self.safety_allowed_area_encode(frame, p1x, p1y, p1z, p2x, p2y, p2z))
def set_roll_pitch_yaw_thrust_encode(self, target_system, target_component, roll, pitch, yaw, thrust):
'''
Set roll, pitch and yaw.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
roll : Desired roll angle in radians (float)
pitch : Desired pitch angle in radians (float)
yaw : Desired yaw angle in radians (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
msg = MAVLink_set_roll_pitch_yaw_thrust_message(target_system, target_component, roll, pitch, yaw, thrust)
msg.pack(self)
return msg
def set_roll_pitch_yaw_thrust_send(self, target_system, target_component, roll, pitch, yaw, thrust):
'''
Set roll, pitch and yaw.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
roll : Desired roll angle in radians (float)
pitch : Desired pitch angle in radians (float)
yaw : Desired yaw angle in radians (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
return self.send(self.set_roll_pitch_yaw_thrust_encode(target_system, target_component, roll, pitch, yaw, thrust))
def set_roll_pitch_yaw_speed_thrust_encode(self, target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust):
'''
Set roll, pitch and yaw.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
roll_speed : Desired roll angular speed in rad/s (float)
pitch_speed : Desired pitch angular speed in rad/s (float)
yaw_speed : Desired yaw angular speed in rad/s (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
msg = MAVLink_set_roll_pitch_yaw_speed_thrust_message(target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust)
msg.pack(self)
return msg
def set_roll_pitch_yaw_speed_thrust_send(self, target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust):
'''
Set roll, pitch and yaw.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
roll_speed : Desired roll angular speed in rad/s (float)
pitch_speed : Desired pitch angular speed in rad/s (float)
yaw_speed : Desired yaw angular speed in rad/s (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
return self.send(self.set_roll_pitch_yaw_speed_thrust_encode(target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust))
def roll_pitch_yaw_thrust_setpoint_encode(self, time_boot_ms, roll, pitch, yaw, thrust):
'''
Setpoint in roll, pitch, yaw currently active on the system.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll : Desired roll angle in radians (float)
pitch : Desired pitch angle in radians (float)
yaw : Desired yaw angle in radians (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
msg = MAVLink_roll_pitch_yaw_thrust_setpoint_message(time_boot_ms, roll, pitch, yaw, thrust)
msg.pack(self)
return msg
def roll_pitch_yaw_thrust_setpoint_send(self, time_boot_ms, roll, pitch, yaw, thrust):
'''
Setpoint in roll, pitch, yaw currently active on the system.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll : Desired roll angle in radians (float)
pitch : Desired pitch angle in radians (float)
yaw : Desired yaw angle in radians (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
return self.send(self.roll_pitch_yaw_thrust_setpoint_encode(time_boot_ms, roll, pitch, yaw, thrust))
def roll_pitch_yaw_speed_thrust_setpoint_encode(self, time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust):
'''
Setpoint in rollspeed, pitchspeed, yawspeed currently active on the
system.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll_speed : Desired roll angular speed in rad/s (float)
pitch_speed : Desired pitch angular speed in rad/s (float)
yaw_speed : Desired yaw angular speed in rad/s (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
msg = MAVLink_roll_pitch_yaw_speed_thrust_setpoint_message(time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust)
msg.pack(self)
return msg
def roll_pitch_yaw_speed_thrust_setpoint_send(self, time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust):
'''
Setpoint in rollspeed, pitchspeed, yawspeed currently active on the
system.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll_speed : Desired roll angular speed in rad/s (float)
pitch_speed : Desired pitch angular speed in rad/s (float)
yaw_speed : Desired yaw angular speed in rad/s (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
return self.send(self.roll_pitch_yaw_speed_thrust_setpoint_encode(time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust))
def set_quad_motors_setpoint_encode(self, target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw):
'''
Setpoint in the four motor speeds
target_system : System ID of the system that should set these motor commands (uint8_t)
motor_front_nw : Front motor in + configuration, front left motor in x configuration (uint16_t)
motor_right_ne : Right motor in + configuration, front right motor in x configuration (uint16_t)
motor_back_se : Back motor in + configuration, back right motor in x configuration (uint16_t)
motor_left_sw : Left motor in + configuration, back left motor in x configuration (uint16_t)
'''
msg = MAVLink_set_quad_motors_setpoint_message(target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw)
msg.pack(self)
return msg
def set_quad_motors_setpoint_send(self, target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw):
'''
Setpoint in the four motor speeds
target_system : System ID of the system that should set these motor commands (uint8_t)
motor_front_nw : Front motor in + configuration, front left motor in x configuration (uint16_t)
motor_right_ne : Right motor in + configuration, front right motor in x configuration (uint16_t)
motor_back_se : Back motor in + configuration, back right motor in x configuration (uint16_t)
motor_left_sw : Left motor in + configuration, back left motor in x configuration (uint16_t)
'''
return self.send(self.set_quad_motors_setpoint_encode(target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw))
def set_quad_swarm_roll_pitch_yaw_thrust_encode(self, group, mode, roll, pitch, yaw, thrust):
'''
Setpoint for up to four quadrotors in a group / wing
group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t)
mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t)
roll : Desired roll angle in radians +-PI (+-32767) (int16_t)
pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t)
yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t)
thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t)
'''
msg = MAVLink_set_quad_swarm_roll_pitch_yaw_thrust_message(group, mode, roll, pitch, yaw, thrust)
msg.pack(self)
return msg
def set_quad_swarm_roll_pitch_yaw_thrust_send(self, group, mode, roll, pitch, yaw, thrust):
'''
Setpoint for up to four quadrotors in a group / wing
group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t)
mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t)
roll : Desired roll angle in radians +-PI (+-32767) (int16_t)
pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t)
yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t)
thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t)
'''
return self.send(self.set_quad_swarm_roll_pitch_yaw_thrust_encode(group, mode, roll, pitch, yaw, thrust))
def nav_controller_output_encode(self, nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error):
'''
Outputs of the APM navigation controller. The primary use of this
message is to check the response and signs of the
controller before actual flight and to assist with
tuning controller parameters.
nav_roll : Current desired roll in degrees (float)
nav_pitch : Current desired pitch in degrees (float)
nav_bearing : Current desired heading in degrees (int16_t)
target_bearing : Bearing to current MISSION/target in degrees (int16_t)
wp_dist : Distance to active MISSION in meters (uint16_t)
alt_error : Current altitude error in meters (float)
aspd_error : Current airspeed error in meters/second (float)
xtrack_error : Current crosstrack error on x-y plane in meters (float)
'''
msg = MAVLink_nav_controller_output_message(nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error)
msg.pack(self)
return msg
def nav_controller_output_send(self, nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error):
'''
Outputs of the APM navigation controller. The primary use of this
message is to check the response and signs of the
controller before actual flight and to assist with
tuning controller parameters.
nav_roll : Current desired roll in degrees (float)
nav_pitch : Current desired pitch in degrees (float)
nav_bearing : Current desired heading in degrees (int16_t)
target_bearing : Bearing to current MISSION/target in degrees (int16_t)
wp_dist : Distance to active MISSION in meters (uint16_t)
alt_error : Current altitude error in meters (float)
aspd_error : Current airspeed error in meters/second (float)
xtrack_error : Current crosstrack error on x-y plane in meters (float)
'''
return self.send(self.nav_controller_output_encode(nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error))
def set_quad_swarm_led_roll_pitch_yaw_thrust_encode(self, group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust):
'''
Setpoint for up to four quadrotors in a group / wing
group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t)
mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t)
led_red : RGB red channel (0-255) (uint8_t)
led_blue : RGB green channel (0-255) (uint8_t)
led_green : RGB blue channel (0-255) (uint8_t)
roll : Desired roll angle in radians +-PI (+-32767) (int16_t)
pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t)
yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t)
thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t)
'''
msg = MAVLink_set_quad_swarm_led_roll_pitch_yaw_thrust_message(group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust)
msg.pack(self)
return msg
def set_quad_swarm_led_roll_pitch_yaw_thrust_send(self, group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust):
'''
Setpoint for up to four quadrotors in a group / wing
group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t)
mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t)
led_red : RGB red channel (0-255) (uint8_t)
led_blue : RGB green channel (0-255) (uint8_t)
led_green : RGB blue channel (0-255) (uint8_t)
roll : Desired roll angle in radians +-PI (+-32767) (int16_t)
pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t)
yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t)
thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t)
'''
return self.send(self.set_quad_swarm_led_roll_pitch_yaw_thrust_encode(group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust))
def state_correction_encode(self, xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr):
'''
Corrects the systems state by adding an error correction term to the
position and velocity, and by rotating the attitude by
a correction angle.
xErr : x position error (float)
yErr : y position error (float)
zErr : z position error (float)
rollErr : roll error (radians) (float)
pitchErr : pitch error (radians) (float)
yawErr : yaw error (radians) (float)
vxErr : x velocity (float)
vyErr : y velocity (float)
vzErr : z velocity (float)
'''
msg = MAVLink_state_correction_message(xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr)
msg.pack(self)
return msg
def state_correction_send(self, xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr):
'''
Corrects the systems state by adding an error correction term to the
position and velocity, and by rotating the attitude by
a correction angle.
xErr : x position error (float)
yErr : y position error (float)
zErr : z position error (float)
rollErr : roll error (radians) (float)
pitchErr : pitch error (radians) (float)
yawErr : yaw error (radians) (float)
vxErr : x velocity (float)
vyErr : y velocity (float)
vzErr : z velocity (float)
'''
return self.send(self.state_correction_encode(xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr))
def request_data_stream_encode(self, target_system, target_component, req_stream_id, req_message_rate, start_stop):
'''
target_system : The target requested to send the message stream. (uint8_t)
target_component : The target requested to send the message stream. (uint8_t)
req_stream_id : The ID of the requested data stream (uint8_t)
req_message_rate : The requested interval between two messages of this type (uint16_t)
start_stop : 1 to start sending, 0 to stop sending. (uint8_t)
'''
msg = MAVLink_request_data_stream_message(target_system, target_component, req_stream_id, req_message_rate, start_stop)
msg.pack(self)
return msg
def request_data_stream_send(self, target_system, target_component, req_stream_id, req_message_rate, start_stop):
'''
target_system : The target requested to send the message stream. (uint8_t)
target_component : The target requested to send the message stream. (uint8_t)
req_stream_id : The ID of the requested data stream (uint8_t)
req_message_rate : The requested interval between two messages of this type (uint16_t)
start_stop : 1 to start sending, 0 to stop sending. (uint8_t)
'''
return self.send(self.request_data_stream_encode(target_system, target_component, req_stream_id, req_message_rate, start_stop))
def data_stream_encode(self, stream_id, message_rate, on_off):
'''
stream_id : The ID of the requested data stream (uint8_t)
message_rate : The requested interval between two messages of this type (uint16_t)
on_off : 1 stream is enabled, 0 stream is stopped. (uint8_t)
'''
msg = MAVLink_data_stream_message(stream_id, message_rate, on_off)
msg.pack(self)
return msg
def data_stream_send(self, stream_id, message_rate, on_off):
'''
stream_id : The ID of the requested data stream (uint8_t)
message_rate : The requested interval between two messages of this type (uint16_t)
on_off : 1 stream is enabled, 0 stream is stopped. (uint8_t)
'''
return self.send(self.data_stream_encode(stream_id, message_rate, on_off))
def manual_control_encode(self, target, x, y, z, r, buttons):
'''
This message provides an API for manually controlling the vehicle
using standard joystick axes nomenclature, along with
a joystick-like input device. Unused axes can be
disabled an buttons are also transmit as boolean
values of their
target : The system to be controlled. (uint8_t)
x : X-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to forward(1000)-backward(-1000) movement on a joystick and the pitch of a vehicle. (int16_t)
y : Y-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to left(-1000)-right(1000) movement on a joystick and the roll of a vehicle. (int16_t)
z : Z-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a separate slider movement with maximum being 1000 and minimum being -1000 on a joystick and the thrust of a vehicle. (int16_t)
r : R-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a twisting of the joystick, with counter-clockwise being 1000 and clockwise being -1000, and the yaw of a vehicle. (int16_t)
buttons : A bitfield corresponding to the joystick buttons' current state, 1 for pressed, 0 for released. The lowest bit corresponds to Button 1. (uint16_t)
'''
msg = MAVLink_manual_control_message(target, x, y, z, r, buttons)
msg.pack(self)
return msg
def manual_control_send(self, target, x, y, z, r, buttons):
'''
This message provides an API for manually controlling the vehicle
using standard joystick axes nomenclature, along with
a joystick-like input device. Unused axes can be
disabled an buttons are also transmit as boolean
values of their
target : The system to be controlled. (uint8_t)
x : X-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to forward(1000)-backward(-1000) movement on a joystick and the pitch of a vehicle. (int16_t)
y : Y-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to left(-1000)-right(1000) movement on a joystick and the roll of a vehicle. (int16_t)
z : Z-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a separate slider movement with maximum being 1000 and minimum being -1000 on a joystick and the thrust of a vehicle. (int16_t)
r : R-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a twisting of the joystick, with counter-clockwise being 1000 and clockwise being -1000, and the yaw of a vehicle. (int16_t)
buttons : A bitfield corresponding to the joystick buttons' current state, 1 for pressed, 0 for released. The lowest bit corresponds to Button 1. (uint16_t)
'''
return self.send(self.manual_control_encode(target, x, y, z, r, buttons))
def rc_channels_override_encode(self, target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw):
'''
The RAW values of the RC channels sent to the MAV to override info
received from the RC radio. A value of -1 means no
change to that channel. A value of 0 means control of
that channel should be released back to the RC radio.
The standard PPM modulation is as follows: 1000
microseconds: 0%, 2000 microseconds: 100%. Individual
receivers/transmitters might violate this
specification.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
chan1_raw : RC channel 1 value, in microseconds (uint16_t)
chan2_raw : RC channel 2 value, in microseconds (uint16_t)
chan3_raw : RC channel 3 value, in microseconds (uint16_t)
chan4_raw : RC channel 4 value, in microseconds (uint16_t)
chan5_raw : RC channel 5 value, in microseconds (uint16_t)
chan6_raw : RC channel 6 value, in microseconds (uint16_t)
chan7_raw : RC channel 7 value, in microseconds (uint16_t)
chan8_raw : RC channel 8 value, in microseconds (uint16_t)
'''
msg = MAVLink_rc_channels_override_message(target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw)
msg.pack(self)
return msg
def rc_channels_override_send(self, target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw):
'''
The RAW values of the RC channels sent to the MAV to override info
received from the RC radio. A value of -1 means no
change to that channel. A value of 0 means control of
that channel should be released back to the RC radio.
The standard PPM modulation is as follows: 1000
microseconds: 0%, 2000 microseconds: 100%. Individual
receivers/transmitters might violate this
specification.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
chan1_raw : RC channel 1 value, in microseconds (uint16_t)
chan2_raw : RC channel 2 value, in microseconds (uint16_t)
chan3_raw : RC channel 3 value, in microseconds (uint16_t)
chan4_raw : RC channel 4 value, in microseconds (uint16_t)
chan5_raw : RC channel 5 value, in microseconds (uint16_t)
chan6_raw : RC channel 6 value, in microseconds (uint16_t)
chan7_raw : RC channel 7 value, in microseconds (uint16_t)
chan8_raw : RC channel 8 value, in microseconds (uint16_t)
'''
return self.send(self.rc_channels_override_encode(target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw))
def vfr_hud_encode(self, airspeed, groundspeed, heading, throttle, alt, climb):
'''
Metrics typically displayed on a HUD for fixed wing aircraft
airspeed : Current airspeed in m/s (float)
groundspeed : Current ground speed in m/s (float)
heading : Current heading in degrees, in compass units (0..360, 0=north) (int16_t)
throttle : Current throttle setting in integer percent, 0 to 100 (uint16_t)
alt : Current altitude (MSL), in meters (float)
climb : Current climb rate in meters/second (float)
'''
msg = MAVLink_vfr_hud_message(airspeed, groundspeed, heading, throttle, alt, climb)
msg.pack(self)
return msg
def vfr_hud_send(self, airspeed, groundspeed, heading, throttle, alt, climb):
'''
Metrics typically displayed on a HUD for fixed wing aircraft
airspeed : Current airspeed in m/s (float)
groundspeed : Current ground speed in m/s (float)
heading : Current heading in degrees, in compass units (0..360, 0=north) (int16_t)
throttle : Current throttle setting in integer percent, 0 to 100 (uint16_t)
alt : Current altitude (MSL), in meters (float)
climb : Current climb rate in meters/second (float)
'''
return self.send(self.vfr_hud_encode(airspeed, groundspeed, heading, throttle, alt, climb))
def command_long_encode(self, target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7):
'''
Send a command with up to four parameters to the MAV
target_system : System which should execute the command (uint8_t)
target_component : Component which should execute the command, 0 for all components (uint8_t)
command : Command ID, as defined by MAV_CMD enum. (uint16_t)
confirmation : 0: First transmission of this command. 1-255: Confirmation transmissions (e.g. for kill command) (uint8_t)
param1 : Parameter 1, as defined by MAV_CMD enum. (float)
param2 : Parameter 2, as defined by MAV_CMD enum. (float)
param3 : Parameter 3, as defined by MAV_CMD enum. (float)
param4 : Parameter 4, as defined by MAV_CMD enum. (float)
param5 : Parameter 5, as defined by MAV_CMD enum. (float)
param6 : Parameter 6, as defined by MAV_CMD enum. (float)
param7 : Parameter 7, as defined by MAV_CMD enum. (float)
'''
msg = MAVLink_command_long_message(target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7)
msg.pack(self)
return msg
def command_long_send(self, target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7):
'''
Send a command with up to four parameters to the MAV
target_system : System which should execute the command (uint8_t)
target_component : Component which should execute the command, 0 for all components (uint8_t)
command : Command ID, as defined by MAV_CMD enum. (uint16_t)
confirmation : 0: First transmission of this command. 1-255: Confirmation transmissions (e.g. for kill command) (uint8_t)
param1 : Parameter 1, as defined by MAV_CMD enum. (float)
param2 : Parameter 2, as defined by MAV_CMD enum. (float)
param3 : Parameter 3, as defined by MAV_CMD enum. (float)
param4 : Parameter 4, as defined by MAV_CMD enum. (float)
param5 : Parameter 5, as defined by MAV_CMD enum. (float)
param6 : Parameter 6, as defined by MAV_CMD enum. (float)
param7 : Parameter 7, as defined by MAV_CMD enum. (float)
'''
return self.send(self.command_long_encode(target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7))
def command_ack_encode(self, command, result):
'''
Report status of a command. Includes feedback wether the command was
executed.
command : Command ID, as defined by MAV_CMD enum. (uint16_t)
result : See MAV_RESULT enum (uint8_t)
'''
msg = MAVLink_command_ack_message(command, result)
msg.pack(self)
return msg
def command_ack_send(self, command, result):
'''
Report status of a command. Includes feedback wether the command was
executed.
command : Command ID, as defined by MAV_CMD enum. (uint16_t)
result : See MAV_RESULT enum (uint8_t)
'''
return self.send(self.command_ack_encode(command, result))
def roll_pitch_yaw_rates_thrust_setpoint_encode(self, time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust):
'''
Setpoint in roll, pitch, yaw rates and thrust currently active on the
system.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll_rate : Desired roll rate in radians per second (float)
pitch_rate : Desired pitch rate in radians per second (float)
yaw_rate : Desired yaw rate in radians per second (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
msg = MAVLink_roll_pitch_yaw_rates_thrust_setpoint_message(time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust)
msg.pack(self)
return msg
def roll_pitch_yaw_rates_thrust_setpoint_send(self, time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust):
'''
Setpoint in roll, pitch, yaw rates and thrust currently active on the
system.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll_rate : Desired roll rate in radians per second (float)
pitch_rate : Desired pitch rate in radians per second (float)
yaw_rate : Desired yaw rate in radians per second (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
'''
return self.send(self.roll_pitch_yaw_rates_thrust_setpoint_encode(time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust))
def manual_setpoint_encode(self, time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch):
'''
Setpoint in roll, pitch, yaw and thrust from the operator
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll : Desired roll rate in radians per second (float)
pitch : Desired pitch rate in radians per second (float)
yaw : Desired yaw rate in radians per second (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
mode_switch : Flight mode switch position, 0.. 255 (uint8_t)
manual_override_switch : Override mode switch position, 0.. 255 (uint8_t)
'''
msg = MAVLink_manual_setpoint_message(time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch)
msg.pack(self)
return msg
def manual_setpoint_send(self, time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch):
'''
Setpoint in roll, pitch, yaw and thrust from the operator
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
roll : Desired roll rate in radians per second (float)
pitch : Desired pitch rate in radians per second (float)
yaw : Desired yaw rate in radians per second (float)
thrust : Collective thrust, normalized to 0 .. 1 (float)
mode_switch : Flight mode switch position, 0.. 255 (uint8_t)
manual_override_switch : Override mode switch position, 0.. 255 (uint8_t)
'''
return self.send(self.manual_setpoint_encode(time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch))
def local_position_ned_system_global_offset_encode(self, time_boot_ms, x, y, z, roll, pitch, yaw):
'''
The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages
of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis
down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
roll : Roll (float)
pitch : Pitch (float)
yaw : Yaw (float)
'''
msg = MAVLink_local_position_ned_system_global_offset_message(time_boot_ms, x, y, z, roll, pitch, yaw)
msg.pack(self)
return msg
def local_position_ned_system_global_offset_send(self, time_boot_ms, x, y, z, roll, pitch, yaw):
'''
The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages
of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis
down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
roll : Roll (float)
pitch : Pitch (float)
yaw : Yaw (float)
'''
return self.send(self.local_position_ned_system_global_offset_encode(time_boot_ms, x, y, z, roll, pitch, yaw))
def hil_state_encode(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc):
'''
Sent from simulation to autopilot. This packet is useful for high
throughput applications such as hardware in the loop
simulations.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
lat : Latitude, expressed as * 1E7 (int32_t)
lon : Longitude, expressed as * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t)
vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t)
vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t)
xacc : X acceleration (mg) (int16_t)
yacc : Y acceleration (mg) (int16_t)
zacc : Z acceleration (mg) (int16_t)
'''
msg = MAVLink_hil_state_message(time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc)
msg.pack(self)
return msg
def hil_state_send(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc):
'''
Sent from simulation to autopilot. This packet is useful for high
throughput applications such as hardware in the loop
simulations.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
lat : Latitude, expressed as * 1E7 (int32_t)
lon : Longitude, expressed as * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t)
vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t)
vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t)
xacc : X acceleration (mg) (int16_t)
yacc : Y acceleration (mg) (int16_t)
zacc : Z acceleration (mg) (int16_t)
'''
return self.send(self.hil_state_encode(time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc))
def hil_controls_encode(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode):
'''
Sent from autopilot to simulation. Hardware in the loop control
outputs
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll_ailerons : Control output -1 .. 1 (float)
pitch_elevator : Control output -1 .. 1 (float)
yaw_rudder : Control output -1 .. 1 (float)
throttle : Throttle 0 .. 1 (float)
aux1 : Aux 1, -1 .. 1 (float)
aux2 : Aux 2, -1 .. 1 (float)
aux3 : Aux 3, -1 .. 1 (float)
aux4 : Aux 4, -1 .. 1 (float)
mode : System mode (MAV_MODE) (uint8_t)
nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
'''
msg = MAVLink_hil_controls_message(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode)
msg.pack(self)
return msg
def hil_controls_send(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode):
'''
Sent from autopilot to simulation. Hardware in the loop control
outputs
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll_ailerons : Control output -1 .. 1 (float)
pitch_elevator : Control output -1 .. 1 (float)
yaw_rudder : Control output -1 .. 1 (float)
throttle : Throttle 0 .. 1 (float)
aux1 : Aux 1, -1 .. 1 (float)
aux2 : Aux 2, -1 .. 1 (float)
aux3 : Aux 3, -1 .. 1 (float)
aux4 : Aux 4, -1 .. 1 (float)
mode : System mode (MAV_MODE) (uint8_t)
nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
'''
return self.send(self.hil_controls_encode(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode))
def hil_rc_inputs_raw_encode(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi):
'''
Sent from simulation to autopilot. The RAW values of the RC channels
received. The standard PPM modulation is as follows:
1000 microseconds: 0%, 2000 microseconds: 100%.
Individual receivers/transmitters might violate this
specification.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
chan1_raw : RC channel 1 value, in microseconds (uint16_t)
chan2_raw : RC channel 2 value, in microseconds (uint16_t)
chan3_raw : RC channel 3 value, in microseconds (uint16_t)
chan4_raw : RC channel 4 value, in microseconds (uint16_t)
chan5_raw : RC channel 5 value, in microseconds (uint16_t)
chan6_raw : RC channel 6 value, in microseconds (uint16_t)
chan7_raw : RC channel 7 value, in microseconds (uint16_t)
chan8_raw : RC channel 8 value, in microseconds (uint16_t)
chan9_raw : RC channel 9 value, in microseconds (uint16_t)
chan10_raw : RC channel 10 value, in microseconds (uint16_t)
chan11_raw : RC channel 11 value, in microseconds (uint16_t)
chan12_raw : RC channel 12 value, in microseconds (uint16_t)
rssi : Receive signal strength indicator, 0: 0%, 255: 100% (uint8_t)
'''
msg = MAVLink_hil_rc_inputs_raw_message(time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi)
msg.pack(self)
return msg
def hil_rc_inputs_raw_send(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi):
'''
Sent from simulation to autopilot. The RAW values of the RC channels
received. The standard PPM modulation is as follows:
1000 microseconds: 0%, 2000 microseconds: 100%.
Individual receivers/transmitters might violate this
specification.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
chan1_raw : RC channel 1 value, in microseconds (uint16_t)
chan2_raw : RC channel 2 value, in microseconds (uint16_t)
chan3_raw : RC channel 3 value, in microseconds (uint16_t)
chan4_raw : RC channel 4 value, in microseconds (uint16_t)
chan5_raw : RC channel 5 value, in microseconds (uint16_t)
chan6_raw : RC channel 6 value, in microseconds (uint16_t)
chan7_raw : RC channel 7 value, in microseconds (uint16_t)
chan8_raw : RC channel 8 value, in microseconds (uint16_t)
chan9_raw : RC channel 9 value, in microseconds (uint16_t)
chan10_raw : RC channel 10 value, in microseconds (uint16_t)
chan11_raw : RC channel 11 value, in microseconds (uint16_t)
chan12_raw : RC channel 12 value, in microseconds (uint16_t)
rssi : Receive signal strength indicator, 0: 0%, 255: 100% (uint8_t)
'''
return self.send(self.hil_rc_inputs_raw_encode(time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi))
def optical_flow_encode(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance):
'''
Optical flow from a flow sensor (e.g. optical mouse sensor)
time_usec : Timestamp (UNIX) (uint64_t)
sensor_id : Sensor ID (uint8_t)
flow_x : Flow in pixels in x-sensor direction (int16_t)
flow_y : Flow in pixels in y-sensor direction (int16_t)
flow_comp_m_x : Flow in meters in x-sensor direction, angular-speed compensated (float)
flow_comp_m_y : Flow in meters in y-sensor direction, angular-speed compensated (float)
quality : Optical flow quality / confidence. 0: bad, 255: maximum quality (uint8_t)
ground_distance : Ground distance in meters. Positive value: distance known. Negative value: Unknown distance (float)
'''
msg = MAVLink_optical_flow_message(time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance)
msg.pack(self)
return msg
def optical_flow_send(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance):
'''
Optical flow from a flow sensor (e.g. optical mouse sensor)
time_usec : Timestamp (UNIX) (uint64_t)
sensor_id : Sensor ID (uint8_t)
flow_x : Flow in pixels in x-sensor direction (int16_t)
flow_y : Flow in pixels in y-sensor direction (int16_t)
flow_comp_m_x : Flow in meters in x-sensor direction, angular-speed compensated (float)
flow_comp_m_y : Flow in meters in y-sensor direction, angular-speed compensated (float)
quality : Optical flow quality / confidence. 0: bad, 255: maximum quality (uint8_t)
ground_distance : Ground distance in meters. Positive value: distance known. Negative value: Unknown distance (float)
'''
return self.send(self.optical_flow_encode(time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance))
def global_vision_position_estimate_encode(self, usec, x, y, z, roll, pitch, yaw):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X position (float)
y : Global Y position (float)
z : Global Z position (float)
roll : Roll angle in rad (float)
pitch : Pitch angle in rad (float)
yaw : Yaw angle in rad (float)
'''
msg = MAVLink_global_vision_position_estimate_message(usec, x, y, z, roll, pitch, yaw)
msg.pack(self)
return msg
def global_vision_position_estimate_send(self, usec, x, y, z, roll, pitch, yaw):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X position (float)
y : Global Y position (float)
z : Global Z position (float)
roll : Roll angle in rad (float)
pitch : Pitch angle in rad (float)
yaw : Yaw angle in rad (float)
'''
return self.send(self.global_vision_position_estimate_encode(usec, x, y, z, roll, pitch, yaw))
def vision_position_estimate_encode(self, usec, x, y, z, roll, pitch, yaw):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X position (float)
y : Global Y position (float)
z : Global Z position (float)
roll : Roll angle in rad (float)
pitch : Pitch angle in rad (float)
yaw : Yaw angle in rad (float)
'''
msg = MAVLink_vision_position_estimate_message(usec, x, y, z, roll, pitch, yaw)
msg.pack(self)
return msg
def vision_position_estimate_send(self, usec, x, y, z, roll, pitch, yaw):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X position (float)
y : Global Y position (float)
z : Global Z position (float)
roll : Roll angle in rad (float)
pitch : Pitch angle in rad (float)
yaw : Yaw angle in rad (float)
'''
return self.send(self.vision_position_estimate_encode(usec, x, y, z, roll, pitch, yaw))
def vision_speed_estimate_encode(self, usec, x, y, z):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X speed (float)
y : Global Y speed (float)
z : Global Z speed (float)
'''
msg = MAVLink_vision_speed_estimate_message(usec, x, y, z)
msg.pack(self)
return msg
def vision_speed_estimate_send(self, usec, x, y, z):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X speed (float)
y : Global Y speed (float)
z : Global Z speed (float)
'''
return self.send(self.vision_speed_estimate_encode(usec, x, y, z))
def vicon_position_estimate_encode(self, usec, x, y, z, roll, pitch, yaw):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X position (float)
y : Global Y position (float)
z : Global Z position (float)
roll : Roll angle in rad (float)
pitch : Pitch angle in rad (float)
yaw : Yaw angle in rad (float)
'''
msg = MAVLink_vicon_position_estimate_message(usec, x, y, z, roll, pitch, yaw)
msg.pack(self)
return msg
def vicon_position_estimate_send(self, usec, x, y, z, roll, pitch, yaw):
'''
usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
x : Global X position (float)
y : Global Y position (float)
z : Global Z position (float)
roll : Roll angle in rad (float)
pitch : Pitch angle in rad (float)
yaw : Yaw angle in rad (float)
'''
return self.send(self.vicon_position_estimate_encode(usec, x, y, z, roll, pitch, yaw))
def highres_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated):
'''
The IMU readings in SI units in NED body frame
time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
xacc : X acceleration (m/s^2) (float)
yacc : Y acceleration (m/s^2) (float)
zacc : Z acceleration (m/s^2) (float)
xgyro : Angular speed around X axis (rad / sec) (float)
ygyro : Angular speed around Y axis (rad / sec) (float)
zgyro : Angular speed around Z axis (rad / sec) (float)
xmag : X Magnetic field (Gauss) (float)
ymag : Y Magnetic field (Gauss) (float)
zmag : Z Magnetic field (Gauss) (float)
abs_pressure : Absolute pressure in millibar (float)
diff_pressure : Differential pressure in millibar (float)
pressure_alt : Altitude calculated from pressure (float)
temperature : Temperature in degrees celsius (float)
fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t)
'''
msg = MAVLink_highres_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated)
msg.pack(self)
return msg
def highres_imu_send(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated):
'''
The IMU readings in SI units in NED body frame
time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
xacc : X acceleration (m/s^2) (float)
yacc : Y acceleration (m/s^2) (float)
zacc : Z acceleration (m/s^2) (float)
xgyro : Angular speed around X axis (rad / sec) (float)
ygyro : Angular speed around Y axis (rad / sec) (float)
zgyro : Angular speed around Z axis (rad / sec) (float)
xmag : X Magnetic field (Gauss) (float)
ymag : Y Magnetic field (Gauss) (float)
zmag : Z Magnetic field (Gauss) (float)
abs_pressure : Absolute pressure in millibar (float)
diff_pressure : Differential pressure in millibar (float)
pressure_alt : Altitude calculated from pressure (float)
temperature : Temperature in degrees celsius (float)
fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t)
'''
return self.send(self.highres_imu_encode(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated))
def file_transfer_start_encode(self, transfer_uid, dest_path, direction, file_size, flags):
'''
Begin file transfer
transfer_uid : Unique transfer ID (uint64_t)
dest_path : Destination path (char)
direction : Transfer direction: 0: from requester, 1: to requester (uint8_t)
file_size : File size in bytes (uint32_t)
flags : RESERVED (uint8_t)
'''
msg = MAVLink_file_transfer_start_message(transfer_uid, dest_path, direction, file_size, flags)
msg.pack(self)
return msg
def file_transfer_start_send(self, transfer_uid, dest_path, direction, file_size, flags):
'''
Begin file transfer
transfer_uid : Unique transfer ID (uint64_t)
dest_path : Destination path (char)
direction : Transfer direction: 0: from requester, 1: to requester (uint8_t)
file_size : File size in bytes (uint32_t)
flags : RESERVED (uint8_t)
'''
return self.send(self.file_transfer_start_encode(transfer_uid, dest_path, direction, file_size, flags))
def file_transfer_dir_list_encode(self, transfer_uid, dir_path, flags):
'''
Get directory listing
transfer_uid : Unique transfer ID (uint64_t)
dir_path : Directory path to list (char)
flags : RESERVED (uint8_t)
'''
msg = MAVLink_file_transfer_dir_list_message(transfer_uid, dir_path, flags)
msg.pack(self)
return msg
def file_transfer_dir_list_send(self, transfer_uid, dir_path, flags):
'''
Get directory listing
transfer_uid : Unique transfer ID (uint64_t)
dir_path : Directory path to list (char)
flags : RESERVED (uint8_t)
'''
return self.send(self.file_transfer_dir_list_encode(transfer_uid, dir_path, flags))
def file_transfer_res_encode(self, transfer_uid, result):
'''
File transfer result
transfer_uid : Unique transfer ID (uint64_t)
result : 0: OK, 1: not permitted, 2: bad path / file name, 3: no space left on device (uint8_t)
'''
msg = MAVLink_file_transfer_res_message(transfer_uid, result)
msg.pack(self)
return msg
def file_transfer_res_send(self, transfer_uid, result):
'''
File transfer result
transfer_uid : Unique transfer ID (uint64_t)
result : 0: OK, 1: not permitted, 2: bad path / file name, 3: no space left on device (uint8_t)
'''
return self.send(self.file_transfer_res_encode(transfer_uid, result))
def battery_status_encode(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining):
'''
Transmitte battery informations for a accu pack.
accu_id : Accupack ID (uint8_t)
voltage_cell_1 : Battery voltage of cell 1, in millivolts (1 = 1 millivolt) (uint16_t)
voltage_cell_2 : Battery voltage of cell 2, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_3 : Battery voltage of cell 3, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_4 : Battery voltage of cell 4, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_5 : Battery voltage of cell 5, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_6 : Battery voltage of cell 6, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot does not estimate the remaining battery (int8_t)
'''
msg = MAVLink_battery_status_message(accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining)
msg.pack(self)
return msg
def battery_status_send(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining):
'''
Transmitte battery informations for a accu pack.
accu_id : Accupack ID (uint8_t)
voltage_cell_1 : Battery voltage of cell 1, in millivolts (1 = 1 millivolt) (uint16_t)
voltage_cell_2 : Battery voltage of cell 2, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_3 : Battery voltage of cell 3, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_4 : Battery voltage of cell 4, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_5 : Battery voltage of cell 5, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
voltage_cell_6 : Battery voltage of cell 6, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot does not estimate the remaining battery (int8_t)
'''
return self.send(self.battery_status_encode(accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining))
def setpoint_8dof_encode(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8):
'''
Set the 8 DOF setpoint for a controller.
target_system : System ID (uint8_t)
val1 : Value 1 (float)
val2 : Value 2 (float)
val3 : Value 3 (float)
val4 : Value 4 (float)
val5 : Value 5 (float)
val6 : Value 6 (float)
val7 : Value 7 (float)
val8 : Value 8 (float)
'''
msg = MAVLink_setpoint_8dof_message(target_system, val1, val2, val3, val4, val5, val6, val7, val8)
msg.pack(self)
return msg
def setpoint_8dof_send(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8):
'''
Set the 8 DOF setpoint for a controller.
target_system : System ID (uint8_t)
val1 : Value 1 (float)
val2 : Value 2 (float)
val3 : Value 3 (float)
val4 : Value 4 (float)
val5 : Value 5 (float)
val6 : Value 6 (float)
val7 : Value 7 (float)
val8 : Value 8 (float)
'''
return self.send(self.setpoint_8dof_encode(target_system, val1, val2, val3, val4, val5, val6, val7, val8))
def setpoint_6dof_encode(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z):
'''
Set the 6 DOF setpoint for a attitude and position controller.
target_system : System ID (uint8_t)
trans_x : Translational Component in x (float)
trans_y : Translational Component in y (float)
trans_z : Translational Component in z (float)
rot_x : Rotational Component in x (float)
rot_y : Rotational Component in y (float)
rot_z : Rotational Component in z (float)
'''
msg = MAVLink_setpoint_6dof_message(target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z)
msg.pack(self)
return msg
def setpoint_6dof_send(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z):
'''
Set the 6 DOF setpoint for a attitude and position controller.
target_system : System ID (uint8_t)
trans_x : Translational Component in x (float)
trans_y : Translational Component in y (float)
trans_z : Translational Component in z (float)
rot_x : Rotational Component in x (float)
rot_y : Rotational Component in y (float)
rot_z : Rotational Component in z (float)
'''
return self.send(self.setpoint_6dof_encode(target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z))
def memory_vect_encode(self, address, ver, type, value):
'''
Send raw controller memory. The use of this message is discouraged for
normal packets, but a quite efficient way for testing
new messages and getting experimental debug output.
address : Starting address of the debug variables (uint16_t)
ver : Version code of the type variable. 0=unknown, type ignored and assumed int16_t. 1=as below (uint8_t)
type : Type code of the memory variables. for ver = 1: 0=16 x int16_t, 1=16 x uint16_t, 2=16 x Q15, 3=16 x 1Q14 (uint8_t)
value : Memory contents at specified address (int8_t)
'''
msg = MAVLink_memory_vect_message(address, ver, type, value)
msg.pack(self)
return msg
def memory_vect_send(self, address, ver, type, value):
'''
Send raw controller memory. The use of this message is discouraged for
normal packets, but a quite efficient way for testing
new messages and getting experimental debug output.
address : Starting address of the debug variables (uint16_t)
ver : Version code of the type variable. 0=unknown, type ignored and assumed int16_t. 1=as below (uint8_t)
type : Type code of the memory variables. for ver = 1: 0=16 x int16_t, 1=16 x uint16_t, 2=16 x Q15, 3=16 x 1Q14 (uint8_t)
value : Memory contents at specified address (int8_t)
'''
return self.send(self.memory_vect_encode(address, ver, type, value))
def debug_vect_encode(self, name, time_usec, x, y, z):
'''
name : Name (char)
time_usec : Timestamp (uint64_t)
x : x (float)
y : y (float)
z : z (float)
'''
msg = MAVLink_debug_vect_message(name, time_usec, x, y, z)
msg.pack(self)
return msg
def debug_vect_send(self, name, time_usec, x, y, z):
'''
name : Name (char)
time_usec : Timestamp (uint64_t)
x : x (float)
y : y (float)
z : z (float)
'''
return self.send(self.debug_vect_encode(name, time_usec, x, y, z))
def named_value_float_encode(self, time_boot_ms, name, value):
'''
Send a key-value pair as float. The use of this message is discouraged
for normal packets, but a quite efficient way for
testing new messages and getting experimental debug
output.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
name : Name of the debug variable (char)
value : Floating point value (float)
'''
msg = MAVLink_named_value_float_message(time_boot_ms, name, value)
msg.pack(self)
return msg
def named_value_float_send(self, time_boot_ms, name, value):
'''
Send a key-value pair as float. The use of this message is discouraged
for normal packets, but a quite efficient way for
testing new messages and getting experimental debug
output.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
name : Name of the debug variable (char)
value : Floating point value (float)
'''
return self.send(self.named_value_float_encode(time_boot_ms, name, value))
def named_value_int_encode(self, time_boot_ms, name, value):
'''
Send a key-value pair as integer. The use of this message is
discouraged for normal packets, but a quite efficient
way for testing new messages and getting experimental
debug output.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
name : Name of the debug variable (char)
value : Signed integer value (int32_t)
'''
msg = MAVLink_named_value_int_message(time_boot_ms, name, value)
msg.pack(self)
return msg
def named_value_int_send(self, time_boot_ms, name, value):
'''
Send a key-value pair as integer. The use of this message is
discouraged for normal packets, but a quite efficient
way for testing new messages and getting experimental
debug output.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
name : Name of the debug variable (char)
value : Signed integer value (int32_t)
'''
return self.send(self.named_value_int_encode(time_boot_ms, name, value))
def statustext_encode(self, severity, text):
'''
Status text message. These messages are printed in yellow in the COMM
console of QGroundControl. WARNING: They consume quite
some bandwidth, so use only for important status and
error messages. If implemented wisely, these messages
are buffered on the MCU and sent only at a limited
rate (e.g. 10 Hz).
severity : Severity of status. Relies on the definitions within RFC-5424. See enum MAV_SEVERITY. (uint8_t)
text : Status text message, without null termination character (char)
'''
msg = MAVLink_statustext_message(severity, text)
msg.pack(self)
return msg
def statustext_send(self, severity, text):
'''
Status text message. These messages are printed in yellow in the COMM
console of QGroundControl. WARNING: They consume quite
some bandwidth, so use only for important status and
error messages. If implemented wisely, these messages
are buffered on the MCU and sent only at a limited
rate (e.g. 10 Hz).
severity : Severity of status. Relies on the definitions within RFC-5424. See enum MAV_SEVERITY. (uint8_t)
text : Status text message, without null termination character (char)
'''
return self.send(self.statustext_encode(severity, text))
def debug_encode(self, time_boot_ms, ind, value):
'''
Send a debug value. The index is used to discriminate between values.
These values show up in the plot of QGroundControl as
DEBUG N.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
ind : index of debug variable (uint8_t)
value : DEBUG value (float)
'''
msg = MAVLink_debug_message(time_boot_ms, ind, value)
msg.pack(self)
return msg
def debug_send(self, time_boot_ms, ind, value):
'''
Send a debug value. The index is used to discriminate between values.
These values show up in the plot of QGroundControl as
DEBUG N.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
ind : index of debug variable (uint8_t)
value : DEBUG value (float)
'''
return self.send(self.debug_encode(time_boot_ms, ind, value))
| {
"content_hash": "93b6f2842bfbb04c3fb6d3d8531b929b",
"timestamp": "",
"source": "github",
"line_count": 5268,
"max_line_length": 579,
"avg_line_length": 59.31435079726651,
"alnum_prop": 0.5540503347542788,
"repo_name": "Yndal/ArduPilot-SensorPlatform",
"id": "bbad57b7d44ba31e051097ea3b7b4fb665a2a046",
"size": "312468",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "PX4Firmware/Tools/mavlink_px4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "12462"
},
{
"name": "Assembly",
"bytes": "799628"
},
{
"name": "Batchfile",
"bytes": "68199"
},
{
"name": "C",
"bytes": "55034159"
},
{
"name": "C#",
"bytes": "9917"
},
{
"name": "C++",
"bytes": "13663242"
},
{
"name": "CMake",
"bytes": "13681"
},
{
"name": "CSS",
"bytes": "6280"
},
{
"name": "EmberScript",
"bytes": "19928"
},
{
"name": "GDB",
"bytes": "744"
},
{
"name": "Groff",
"bytes": "43610"
},
{
"name": "HTML",
"bytes": "9849"
},
{
"name": "Io",
"bytes": "286"
},
{
"name": "Java",
"bytes": "4394945"
},
{
"name": "Lex",
"bytes": "13878"
},
{
"name": "Lua",
"bytes": "87871"
},
{
"name": "M4",
"bytes": "15467"
},
{
"name": "Makefile",
"bytes": "8807880"
},
{
"name": "Matlab",
"bytes": "185473"
},
{
"name": "Objective-C",
"bytes": "24203"
},
{
"name": "OpenEdge ABL",
"bytes": "12712"
},
{
"name": "PHP",
"bytes": "484"
},
{
"name": "Pascal",
"bytes": "253102"
},
{
"name": "Perl",
"bytes": "17902"
},
{
"name": "Processing",
"bytes": "168008"
},
{
"name": "Python",
"bytes": "1785059"
},
{
"name": "Ruby",
"bytes": "7108"
},
{
"name": "Scilab",
"bytes": "1502"
},
{
"name": "Shell",
"bytes": "1276765"
},
{
"name": "Yacc",
"bytes": "30289"
}
],
"symlink_target": ""
} |
"""
Constraint module
Functions:
None
Classes:
Constraint: dict-like object to hold requirements as key:value pairs
Exceptions:
None
"""
from __future__ import print_function
import collections
import copy
from ec2rlcore.logutil import LogUtil
class Constraint(dict):
"""
Holds parsed module metadata and command line arguments
Attributes:
Methods:
without_keys: return a Constraint with all keys except those specified
with_keys: return a Constraint with only the specified keys
update: merge the specified dict into the Constraint's existing key:value pairs
__setitem__: add or update the key:value pair using the specified values
__contains__: return whether the Constraint contains the specified object or value
"""
def __init__(self, arg=None, **kwargs):
"""
Perform initial configuration of the object.
Parameters:
arg: a dict of key:value pairs
kwargs: optional key:value pairs where the kwarg == key and arg value == value
"""
self.logger = LogUtil.get_root_logger()
self.logger.debug("constraint.Constraint.__init__({}, {})".format(arg, kwargs))
super(Constraint, self).__init__(self)
if arg and isinstance(arg, dict):
self.logger.debug("arg is dict")
self.update(arg)
if kwargs:
self.update(kwargs)
self.logger.debug("resulting constraint dict = {}".format(self))
def without_keys(self, keylist):
"""Return an instance of Constraint with all keys except those in the keylist parameter"""
self.logger.debug("constraint.Constraint.without_keys()")
assert isinstance(keylist, (list, tuple, set))
constraint_dict = {}
for key in self:
if key not in keylist:
constraint_dict[key] = self[key]
return Constraint(constraint_dict)
def with_keys(self, keylist):
"""Return an instance of Constraint with only the keys in the keylist parameter"""
self.logger.debug("constraint.Constraint.with_keys()")
assert isinstance(keylist, (list, tuple, set))
constraint_dict = {}
for key in self:
if key in keylist:
constraint_dict[key] = self[key]
return Constraint(constraint_dict)
def update(self, other):
"""
Recurse through the "other" dict and update existing key:value pairs and add missing new:value pairs
Parameters:
other (dict): the dict whose key:value pairs will be added to the Constraint
Returns:
True (bool)
"""
def merge_values(key):
"""
Given a key, merge other's key's values with the constraint's key's values
Parameters:
key (str): the key whose values should be merged
Returns:
None
"""
self.logger.debug("merge existing '{}'".format(key))
for item in other[key]:
# Only add the item if it is not a duplicate
if item not in self[key]:
self[key].append(item)
self.logger.debug("constraint.Constraint.update({})".format(other))
# Verify "other" is a dict
if not isinstance(other, dict):
self.logger.debug("TypeError: expected dict")
raise TypeError("{0!r} is not a dict or Constraint mapping".format(other))
for okey in other.keys():
# Recursive cases. Recurse another level to find key:value pairs.
# Case: value is empty. Recurse with an empty list.
if other[okey] is None:
self.logger.debug("None case: recursing on '{}'".format(okey))
self.update({okey: []})
# Case: value is a dict
elif type(other[okey]) == dict:
self.logger.debug("is dict case: recursing on '{}'".format(okey))
self.update(other[okey])
# Case: value is a set, tuple, or str. Recurse with a list created from the value.
elif isinstance(other[okey], (set, tuple)):
self.logger.debug("isinstance case: recursing on '{}'".format(okey))
self.update({okey: list(other[okey])})
elif isinstance(other[okey], str):
self.logger.debug("isinstance case: recursing on '{}'".format(okey))
# If the string is a space-delimited sequence of values then split the string and recurse on the list
if " " in other[okey]:
self.update({okey: other[okey].split()})
# If the okey value is an empty string then the resultant value should be an empty list
elif not other[okey]:
self.update({okey: []})
else:
self.update({okey: [other[okey]]})
# Base case: value is a list. Merge in the list or set the list as the key's value.
else:
# Case: if the key exists in the dict then merge in the values in the list
if okey in self:
merge_values(okey)
# Case: if the key doesn't exist in the dict then set the list as the key's value
else:
self.logger.debug("setting new '{}'".format(okey))
self[okey] = copy.deepcopy(other[okey])
return True
def __setitem__(self, key, val):
"""
Update/set the Constraint key:value pair from the given parameter values. val should normally be a list.
If val is a list, check the value
Parameters:
key (str): key representing the constraint name (e.g. "distro")
val: value representing the constraint value or values (e.g. "alami")
Returns:
True (bool)
"""
# self.logger.debug("constraint.Constraint.__setitem__({}, {})".format(key, val))
if not isinstance(val, list):
if isinstance(val, str):
self.__setitem__(key, [val])
elif isinstance(val, collections.Iterable):
self.__setitem__(key, list(val))
else:
if key in self:
del self[key]
dict.__setitem__(self, key, val)
return True
def __contains__(self, other):
"""
Return whether the Constraint contains a key, list of keys, or a dictionary.
Parameters:
other: the object to check whether is present in the Constraint
Returns:
rv (bool): whether the value is contained in the Constraint
"""
rv = False
def rebool(item_to_search):
"""Search item_to_search for False and return False if found else return the truthiness of x.
Parameters:
item_to_search: an iterable to be searched
Returns:
(bool): False if False is in item_to_search else the value of truthiness of item_to_search
"""
return False if False in item_to_search else bool(item_to_search)
if isinstance(other, (list, tuple)):
rv = [self.__contains__(i) for i in other]
rv = rebool(rv)
elif not isinstance(other, dict):
rv = dict.__contains__(self, other)
# Equivalent to "elif isinstance(other, dict):"
else:
for okey in other.keys():
if isinstance(other[okey], (list, tuple)):
rv = [self.__contains__(dict([(okey, v)])) for v in other[okey]]
rv = rebool(rv)
elif okey in self:
rv = other[okey] in self[okey]
else:
rv = False
return rv
| {
"content_hash": "d345da90b6304a71504e259eb3ed7f19",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 117,
"avg_line_length": 38.6551724137931,
"alnum_prop": 0.5664585191793042,
"repo_name": "gregbdunn/aws-ec2rescue-linux",
"id": "2c0c5f7b826aef8ec9512957a38ab3977a12d05e",
"size": "8416",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ec2rlcore/constraint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "701"
},
{
"name": "Makefile",
"bytes": "5044"
},
{
"name": "Python",
"bytes": "4595518"
},
{
"name": "Shell",
"bytes": "5229"
}
],
"symlink_target": ""
} |
from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, comment, decorators, errors, post, user | {
"content_hash": "e2770ef928cce59959d7280f3b057026",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 69,
"avg_line_length": 26.4,
"alnum_prop": 0.7424242424242424,
"repo_name": "Ivicel/flasky",
"id": "34c00c09954b13260611f514c18bd3636922fb49",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api_1_0/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "17128"
},
{
"name": "JavaScript",
"bytes": "36"
},
{
"name": "Python",
"bytes": "48932"
}
],
"symlink_target": ""
} |
"""RevKit support for control function oracles."""
from projectq.ops import BasicGate
from ._utils import _exec
class ControlFunctionOracle: # pylint: disable=too-few-public-methods
"""
Synthesize a negation controlled by an arbitrary control function.
This creates a circuit for a NOT gate which is controlled by an arbitrary
Boolean control function. The control function is provided as integer
representation of the function's truth table in binary notation. For
example, for the majority-of-three function, which truth table 11101000,
the value for function can be, e.g., ``0b11101000``, ``0xe8``, or ``232``.
Example:
This example creates a circuit that causes to invert qubit ``d``,
the majority-of-three function evaluates to true for the control
qubits ``a``, ``b``, and ``c``.
.. code-block:: python
ControlFunctionOracle(0x8E) | ([a, b, c], d)
"""
def __init__(self, function, **kwargs):
"""
Initialize a control function oracle.
Args:
function (int): Function truth table.
Keyword Args:
synth: A RevKit synthesis command which creates a reversible
circuit based on a truth table and requires no additional
ancillae (e.g., ``revkit.esopbs``). Can also be a nullary
lambda that calls several RevKit commands.
**Default:** ``revkit.esopbs``
"""
if isinstance(function, int):
self.function = function
else:
try:
import dormouse # pylint: disable=import-outside-toplevel
self.function = dormouse.to_truth_table(function)
except ImportError as err: # pragma: no cover
raise RuntimeError(
"The dormouse library needs to be installed in order to "
"automatically compile Python code into functions. Try "
"to install dormouse with 'pip install dormouse'."
) from err
self.kwargs = kwargs
self._check_function()
def __or__(self, qubits):
"""
Apply control function to qubits (and synthesizes circuit).
Args:
qubits (tuple<Qureg>): Qubits to which the control function is
being applied. The first `n` qubits are for
the controls, the last qubit is for the
target qubit.
"""
try:
import revkit # pylint: disable=import-outside-toplevel
except ImportError as err: # pragma: no cover
raise RuntimeError(
"The RevKit Python library needs to be installed and in the "
"PYTHONPATH in order to call this function"
) from err
# pylint: disable=invalid-name
# convert qubits to tuple
qs = []
for item in BasicGate.make_tuple_of_qureg(qubits):
qs += item if isinstance(item, list) else [item]
# function truth table cannot be larger than number of control qubits
# allow
if 2 ** (2 ** (len(qs) - 1)) <= self.function:
raise AttributeError("Function truth table exceeds number of control qubits")
# create truth table from function integer
hex_length = max(2 ** (len(qs) - 1) // 4, 1)
revkit.tt(table=f"{self.function:#0{hex_length}x}")
# create reversible circuit from truth table
self.kwargs.get("synth", revkit.esopbs)()
# check whether circuit has correct signature
if revkit.ps(mct=True, silent=True)['qubits'] != len(qs):
raise RuntimeError("Generated circuit lines does not match provided qubits")
# convert reversible circuit to ProjectQ code and execute it
_exec(revkit.to_projectq(mct=True), qs)
def _check_function(self):
"""Check whether function is valid."""
# function must be positive. We check in __or__ whether function is
# too large
if self.function < 0:
raise AttributeError("Function must be a positive integer")
| {
"content_hash": "894767056d97fe2eada6ae5f6c6907e8",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 89,
"avg_line_length": 39.083333333333336,
"alnum_prop": 0.5929874437337124,
"repo_name": "ProjectQ-Framework/ProjectQ",
"id": "40a07fe9f87fe1a92f0361f51bd9287cc2944c5f",
"size": "4842",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "projectq/libs/revkit/_control_function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "158833"
},
{
"name": "Python",
"bytes": "1483141"
}
],
"symlink_target": ""
} |
"""
Train script
"""
import csv
import classifier
def main():
print('Reading CSV...')
csvfile = open('../data/train.csv', newline='')
datareader = csv.DictReader(csvfile)
data = list(datareader)
print('Training...')
classifier.train(data)
if __name__ == "__main__":
main()
| {
"content_hash": "aaf86c8e78c5891ec6e128261e4e6851",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 51,
"avg_line_length": 16.105263157894736,
"alnum_prop": 0.5947712418300654,
"repo_name": "jramcast/ml_weather",
"id": "11921a464837dc1077b5c6dd2b906ab4dfb83f09",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example9/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35819"
}
],
"symlink_target": ""
} |
import os
from filecmp import cmp
import numpy as np
from pandas.util.testing import assert_frame_equal
from moldynplot.dataset.HSQCDataset import HSQCDataset
from moldynplot.dataset.SequenceDataset import SequenceDataset
from moldynplot.dataset.TimeSeriesDataset import TimeSeriesDataset
################################## FUNCTIONS ##################################
def h5_cmp(file_1, file_2):
"""
Compares two hdf5 files
Args:
file_1 (str): Path to first hdf5 file
file_2 (str): Path to second hdf5 file
Returns:
bool: True if the files are identical, false otherwise
"""
from os import devnull
from subprocess import Popen, PIPE
with open(devnull, "w") as fnull:
output = Popen("h5diff {0} {1}".format(file_1, file_2), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip()
if output == "":
return True
else:
return False
#################################### TESTS ####################################
def test_hsqc():
# Read NMRPipe
pipe = HSQCDataset(infile="data/mocvnh3/hsqc.ft")
# Read text
text = HSQCDataset(infile="data/mocvnh3/hsqc.dat", )
assert_frame_equal(pipe.hsqc_df, text.hsqc_df)
# Read hdf5
hdf5 = HSQCDataset(infile="data/mocvnh3/hsqc.h5")
assert_frame_equal(pipe.hsqc_df, hdf5.hsqc_df)
# Write text
pipe.write(dataframe=pipe.hsqc_df, outfile="hsqc.dat")
assert (cmp("hsqc.dat", "data/mocvnh3/hsqc.dat") == True)
# Write hdf5
if os.path.exists("hsqc.h5"):
os.remove("hsqc.h5")
pipe.write(dataframe=pipe.hsqc_df, outfile="hsqc.h5")
assert (h5_cmp("hsqc.h5", "data/mocvnh3/hsqc.h5") == True)
def test_sequence():
# Read text
text = SequenceDataset(infile="data/gb3/relax_s2.dat")
# Read hdf5
hdf5 = SequenceDataset(infile="data/gb3/relax_s2.h5")
assert_frame_equal(text.sequence_df, hdf5.sequence_df)
# Merge text
merged_text = SequenceDataset(
infiles=["data/gb3/relax.dat", "data/gb3/s2.dat"])
assert_frame_equal(text.sequence_df, merged_text.sequence_df)
# Merge hdf5
merged_hdf5 = SequenceDataset(
infiles=["data/gb3/relax.h5", "data/gb3/s2.h5"])
assert_frame_equal(text.sequence_df, merged_hdf5.sequence_df)
# Write text
text.write(dataframe=text.sequence_df, outfile="relax_s2.dat")
assert (cmp("relax_s2.dat", "data/gb3/relax_s2.dat") == True)
# Write hdf5
if os.path.exists("relax_s2.h5"):
os.remove("relax_s2.h5")
text.write(dataframe=text.sequence_df, outfile="relax_s2.h5")
assert (h5_cmp("relax_s2.h5", "data/gb3/relax_s2.h5") == True)
def test_rmsd():
# Read cpptraj
cpptraj = TimeSeriesDataset(infile="data/p53/rmsd.cpptraj", dt=0.1,
toffset=-0.1)
# Read text
text = TimeSeriesDataset(infile="data/p53/rmsd.dat")
assert_frame_equal(cpptraj.timeseries_df, text.timeseries_df)
# Read hdf5
hdf5 = TimeSeriesDataset(infile="data/p53/rmsd.h5")
assert_frame_equal(cpptraj.timeseries_df, hdf5.timeseries_df)
# Write text
cpptraj.write(dataframe=text.timeseries_df, outfile="rmsd.dat")
assert (cmp("rmsd.dat", "data/p53/rmsd.dat") == True)
# Write hdf5
if os.path.exists("rmsd.h5"):
os.remove("rmsd.h5")
cpptraj.write(dataframe=text.timeseries_df, outfile="rmsd.h5")
assert (h5_cmp("rmsd.h5", "data/p53/rmsd.h5") == True)
def test_radgyr():
# Read cpptraj
cpptraj = TimeSeriesDataset(infile="data/p53/radgyr.cpptraj", dt=0.1,
toffset=-0.1)
# Read text
text = TimeSeriesDataset(infile="data/p53/radgyr.dat")
assert_frame_equal(cpptraj.timeseries_df, text.timeseries_df)
# Read hdf5
hdf5 = TimeSeriesDataset(infile="data/p53/radgyr.h5")
assert_frame_equal(cpptraj.timeseries_df, hdf5.timeseries_df)
# Write text
cpptraj.write(dataframe=text.timeseries_df, outfile="radgyr.dat")
assert (cmp("radgyr.dat", "data/p53/radgyr.dat") == True)
# Write hdf5
if os.path.exists("radgyr.h5"):
os.remove("radgyr.h5")
cpptraj.write(dataframe=text.timeseries_df, outfile="radgyr.h5")
assert (h5_cmp("radgyr.h5", "data/p53/radgyr.h5") == True)
def test_perresrmsd():
# Read cpptraj
cpptraj = TimeSeriesDataset(infile="data/p53/perresrmsd.cpptraj", dt=0.1,
toffset=-0.1, dtype=np.float32)
# Read text
text = TimeSeriesDataset(infile="data/p53/perresrmsd.dat",
dtype=np.float32)
assert_frame_equal(cpptraj.timeseries_df, text.timeseries_df)
# Read hdf5
hdf5 = TimeSeriesDataset(infile="data/p53/perresrmsd.h5")
assert_frame_equal(cpptraj.timeseries_df, hdf5.timeseries_df)
# Read legacy hdf5
legacy = TimeSeriesDataset(infile="data/p53/perresrmsd_legacy.h5", dt=0.1)
assert_frame_equal(cpptraj.timeseries_df, legacy.timeseries_df)
# Write text
cpptraj.write(dataframe=text.timeseries_df, outfile="perresrmsd.dat")
assert (cmp("perresrmsd.dat", "data/p53/perresrmsd.dat") == True)
# Write hdf5
if os.path.exists("perresrmsd.h5"):
os.remove("perresrmsd.h5")
cpptraj.write(dataframe=text.timeseries_df, outfile="perresrmsd.h5")
assert (h5_cmp("perresrmsd.h5", "data/p53/perresrmsd.h5") == True)
def test_dssp():
# Read cpptraj
cpptraj = TimeSeriesDataset(infile="data/p53/dssp.cpptraj", dt=0.1,
toffset=-0.1, dtype=np.uint8)
# Read text
text = TimeSeriesDataset(infile="data/p53/dssp.dat", dtype=np.uint8)
assert_frame_equal(cpptraj.timeseries_df, text.timeseries_df)
# Read hdf5
hdf5 = TimeSeriesDataset(infile="data/p53/dssp.h5")
assert_frame_equal(cpptraj.timeseries_df, hdf5.timeseries_df)
# Read legacy hdf5
legacy = TimeSeriesDataset(infile="data/p53/dssp_legacy.h5", dt=0.1)
assert_frame_equal(cpptraj.timeseries_df, legacy.timeseries_df)
# Write text
cpptraj.write(dataframe=text.timeseries_df, outfile="dssp.dat")
assert (cmp("dssp.dat", "data/p53/dssp.dat") == True)
# Write hdf5
if os.path.exists("dssp.h5"):
os.remove("dssp.h5")
cpptraj.write(dataframe=text.timeseries_df, outfile="dssp.h5")
assert (h5_cmp("dssp.h5", "data/p53/dssp.h5") == True)
if __name__ == "__main__":
test_sequence()
test_rmsd()
test_radgyr()
test_perresrmsd()
test_dssp()
test_hsqc()
| {
"content_hash": "95c0d3965aecce89019180debe7de444",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 31.76,
"alnum_prop": 0.6511335012594458,
"repo_name": "KarlTDebiec/Moldynplot",
"id": "4763f20bfe1a59753da167ca38e3387d3fba24e9",
"size": "6689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moldynplot/test/test_Dataset.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "170000"
}
],
"symlink_target": ""
} |
import pytest
from _pytest.config import ExitCode
def test_version_verbose(testdir, pytestconfig):
testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
result = testdir.runpytest("--version", "--version")
assert result.ret == 0
result.stderr.fnmatch_lines(
["*pytest*{}*imported from*".format(pytest.__version__)]
)
if pytestconfig.pluginmanager.list_plugin_distinfo():
result.stderr.fnmatch_lines(["*setuptools registered plugins:", "*at*"])
def test_version_less_verbose(testdir, pytestconfig):
testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
result = testdir.runpytest("--version")
assert result.ret == 0
# p = py.path.local(py.__file__).dirpath()
result.stderr.fnmatch_lines(["pytest {}".format(pytest.__version__)])
def test_help(testdir):
result = testdir.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines(
"""
-m MARKEXPR only run tests matching given mark expression.
For example: -m 'mark1 and not mark2'.
reporting:
--durations=N *
*setup.cfg*
*minversion*
*to see*markers*pytest --markers*
*to see*fixtures*pytest --fixtures*
"""
)
def test_none_help_param_raises_exception(testdir):
"""Tests a None help param raises a TypeError.
"""
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_ini", None, default=True, type="bool")
"""
)
result = testdir.runpytest("--help")
result.stderr.fnmatch_lines(
["*TypeError: help argument cannot be None for test_ini*"]
)
def test_empty_help_param(testdir):
"""Tests an empty help param is displayed correctly.
"""
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_ini", "", default=True, type="bool")
"""
)
result = testdir.runpytest("--help")
assert result.ret == 0
lines = [
" required_plugins (args):",
" plugins that must be present for pytest to run*",
" test_ini (bool):*",
"environment variables:",
]
result.stdout.fnmatch_lines(lines, consecutive=True)
def test_hookvalidation_unknown(testdir):
testdir.makeconftest(
"""
def pytest_hello(xyz):
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(["*unknown hook*pytest_hello*"])
def test_hookvalidation_optional(testdir):
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(optionalhook=True)
def pytest_hello(xyz):
pass
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*using*pytest*py*", "*active plugins*"])
def test_debug(testdir):
result = testdir.runpytest_subprocess("--debug")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
result = testdir.runpytest_subprocess()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(
["*pytest_plugin_registered*", "*manager*PluginManager*"]
)
| {
"content_hash": "5c68c905876e92f7e3e93cfb605f522e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 82,
"avg_line_length": 29.705882352941178,
"alnum_prop": 0.6212164073550213,
"repo_name": "JoelMarcey/buck",
"id": "a33273a2c1daf4c5e6fa6e99f652eda69a388fe5",
"size": "3535",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "third-party/py/pytest/testing/test_helpconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
from kmip.core.enums import KeyFormatType as KeyFormatTypeEnum
from kmip.core.enums import Tags
from kmip.core.enums import QueryFunction as QueryFunctionEnum
from kmip.core.primitives import ByteString
from kmip.core.primitives import Enumeration
from kmip.core.primitives import Interval
from kmip.core.primitives import Struct
from kmip.core.primitives import TextString
from kmip.core.utils import BytearrayStream
class CertificateValue(ByteString):
"""
The bytes of a DER-encoded X.509 public key certificate.
Used by the Certificate Managed Object to store the bytes of the
certificate. See Section 2.2.1 of the KMIP 1.1. specification for more
information.
"""
def __init__(self, value=b''):
"""
Construct a CertificateValue byte string.
Args:
value (bytes): A byte string (e.g., b'\x00\x01...') containing the
certificate bytes to store. Optional, defaults to the empty
byte string.
"""
super(CertificateValue, self).__init__(value, Tags.CERTIFICATE_VALUE)
class Offset(Interval):
"""
An integer representing a positive change in time.
Used by Rekey and Recertify requests to indicate the time difference
between the InitializationDate and the ActivationDate of the replacement
item to be created. See Sections 4.4, 4.5, and 4.8 of the KMIP 1.1
specification for more information.
"""
def __init__(self, value=None):
"""
Construct an Offset object.
Args:
value (int): An integer representing a positive change in time.
Optional, defaults to None.
"""
super(Offset, self).__init__(value, Tags.OFFSET)
class QueryFunction(Enumeration):
"""
An encodeable wrapper for the QueryFunction enumeration.
Used by Query requests to specify the information to retrieve from the
KMIP server. See Sections 4.25 and 9.1.3.2.24 of the KMIP 1.1
specification for more information.
"""
ENUM_TYPE = QueryFunctionEnum
def __init__(self, value=None):
"""
Construct a QueryFunction object.
Args:
value (QueryFunction enum): A QueryFunction enumeration value,
(e.g., QueryFunction.QUERY_OPERATIONS). Optional, default to
None.
"""
super(QueryFunction, self).__init__(value, Tags.QUERY_FUNCTION)
class VendorIdentification(TextString):
"""
A text string uniquely identifying a KMIP vendor.
Returned by KMIP servers upon receipt of a Query request for server
information. See Section 4.25 of the KMIP 1.1. specification for more
information.
"""
def __init__(self, value=None):
"""
Construct a VendorIdentification object.
Args:
value (str): A string describing a KMIP vendor. Optional, defaults
to None.
"""
super(VendorIdentification, self).__init__(
value, Tags.VENDOR_IDENTIFICATION)
class ServerInformation(Struct):
"""
A structure containing vendor-specific fields and/or substructures.
Returned by KMIP servers upon receipt of a Query request for server
information. See Section 4.25 of the KMIP 1.1 specification for more
information.
Note:
There are no example structures nor data encodings in the KMIP
documentation of this object. Therefore this class handles encoding and
decoding its data in a generic way, using a BytearrayStream for primary
storage. The intent is for vendor-specific subclasses to decide how to
decode this data from the stream attribute. Likewise, these subclasses
must decide how to encode their data into the stream attribute. There
are no arguments to the constructor and therefore no means by which to
validate the object's contents.
"""
def __init__(self):
"""
Construct a ServerInformation object.
"""
super(ServerInformation, self).__init__(Tags.SERVER_INFORMATION)
self.data = BytearrayStream()
self.validate()
def read(self, istream):
"""
Read the data encoding the ServerInformation object and decode it into
its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
"""
super(ServerInformation, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.data = BytearrayStream(tstream.read())
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
"""
Write the data encoding the ServerInformation object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
"""
tstream = BytearrayStream()
tstream.write(self.data.buffer)
self.length = tstream.length()
super(ServerInformation, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
"""
Error check the types of the different parts of the ServerInformation
object.
"""
self.__validate()
def __validate(self):
# NOTE (peter-hamilton): Intentional pass, no way to validate data.
pass
def __eq__(self, other):
if isinstance(other, ServerInformation):
if len(self.data) != len(other.data):
return False
elif self.data != other.data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ServerInformation):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
return "ServerInformation()"
def __str__(self):
return str(self.data)
class KeyFormatType(Enumeration):
"""
An encodeable wrapper for the KeyFormatType enumeration.
Used to identify the format of different types of keys in KeyBlock and
Digest objects, it can also be used to specify the format in which a key
is returned when using the Get operation. See Sections 2.1.3, 2.1.7, 3.17,
4.11, and 9.1.3.2.3 of the KMIP 1.1 specification for more information.
"""
ENUM_TYPE = KeyFormatTypeEnum
def __init__(self, value=KeyFormatTypeEnum.RAW):
"""
Construct a KeyFormatType object.
Args:
value (KeyFormatType): A KeyFormatType enumeration value,
(e.g., KeyFormatType.PKCS_1). Optional, default to
KeyFormatType.RAW.
"""
super(KeyFormatType, self).__init__(value, Tags.KEY_FORMAT_TYPE)
| {
"content_hash": "a86a80b25820ad7f4a879431bbf977af",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 78,
"avg_line_length": 32.093023255813954,
"alnum_prop": 0.6440579710144928,
"repo_name": "callidus/PyKMIP",
"id": "05b43554a188d45424a62a08b786d0671ad4e268",
"size": "7546",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kmip/core/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "945669"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
from hazelcast.protocol.builtin import CodecUtil
# hex: 0x014600
_REQUEST_MESSAGE_TYPE = 83456
# hex: 0x014601
_RESPONSE_MESSAGE_TYPE = 83457
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_TTL_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_MAX_IDLE_OFFSET = _REQUEST_TTL_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_MAX_IDLE_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, value, thread_id, ttl, max_idle):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TTL_OFFSET, ttl)
FixSizedTypesCodec.encode_long(buf, _REQUEST_MAX_IDLE_OFFSET, max_idle)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key)
DataCodec.encode(buf, value, True)
return OutboundMessage(buf, False)
def decode_response(msg):
msg.next_frame()
return CodecUtil.decode_nullable(msg, DataCodec.decode)
| {
"content_hash": "841f73730dc0c8d2fca0a27a7691d255",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 105,
"avg_line_length": 41.6875,
"alnum_prop": 0.7758620689655172,
"repo_name": "hazelcast/hazelcast-python-client",
"id": "871e1885258edac2ff7a5a59c28959f686f08887",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hazelcast/protocol/codec/map_put_if_absent_with_max_idle_codec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2300326"
},
{
"name": "Shell",
"bytes": "1900"
}
],
"symlink_target": ""
} |
"""
Run grep, ignoring irrelevant dirs.
Note on parameters:
* it is easier to control colors using command's option
* if switches need to be passed to grep, one can use `--`
"""
import argparse
import subprocess
import sys
import cmdutil
EXCLUDED_DIRS = [".git", ".idea"]
class Grep(cmdutil.Subcommand):
def configure_parser(self, parser):
parser.add_argument(
"-c",
"--color",
"--colour",
choices=["a", "always", "n", "never"],
default=None,
help="use ANSI colors in output",
)
parser.add_argument(
"-g",
"--grep",
action="store_true",
help="use grep directly (instead of git grep)",
)
parser.add_argument(
"-p", "--pager", action="store_true", help="enable paging of results"
)
parser.unknown_args_name = "grep_args"
def execute(self):
arguments = self.arguments
if arguments.verbose:
print("grep args:", arguments.grep_args, flush=True)
if arguments.grep:
command = ["grep", "-r"]
else:
command = ["git"]
if not arguments.pager:
command.append("--no-pager")
command.append("grep")
if arguments.color:
if arguments.color[:1] == "a":
command.append("--color=always")
if arguments.color[:1] == "n":
command.append("--color=never")
else:
command.append("--color=auto")
command += arguments.grep_args
if arguments.grep:
command += [f"--exclude-dir={dir}" for dir in EXCLUDED_DIRS]
completed_process = subprocess.run(command)
return completed_process.returncode
if __name__ == "__main__":
Grep().run(sys.argv)
| {
"content_hash": "5136489bdbe4e561c2ac7c040b0223c3",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 81,
"avg_line_length": 28.49230769230769,
"alnum_prop": 0.5340172786177105,
"repo_name": "vadim-ex/subcommand",
"id": "97e62a2befc3d8178e7c1973c385e44e218281c5",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmd-lib/cmd-grep.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23192"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.rename_doc import get_link_fields, dynamic_link_queries
from frappe.permissions import reset_perms
def execute():
frappe.reload_doctype("DocType")
frappe.reload_doctype("Communication")
reset_perms("Communication")
migrate_comments()
frappe.delete_doc("DocType", "Comment")
# frappe.db.sql_ddl("drop table `tabComment`")
migrate_feed()
frappe.delete_doc("DocType", "Feed")
# frappe.db.sql_ddl("drop table `tabFeed`")
update_timeline_doc_for("Blogger")
def migrate_comments():
# comments
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
content,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
reference_name,
link_doctype,
link_name,
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
substring(comment, 1, 100) as subject,
comment as content,
comment_by as sender,
comment_by_fullname as sender_full_name,
comment_type,
ifnull(timestamp(comment_date, comment_time), creation) as communication_date,
comment_doctype as reference_doctype,
comment_docname as reference_name,
reference_doctype as link_doctype,
reference_name as link_name,
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
'Comment' as communication_type,
1 as seen
from `tabComment` where comment_doctype is not null and comment_doctype not in ('Message', 'My Company')""")
# chat and assignment notifications
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
content,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
reference_name,
link_doctype,
link_name,
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
case
when parenttype='Assignment' then %(assignment)s
else substring(comment, 1, 100)
end
as subject,
comment as content,
comment_by as sender,
comment_by_fullname as sender_full_name,
comment_type,
ifnull(timestamp(comment_date, comment_time), creation) as communication_date,
'User' as reference_doctype,
comment_docname as reference_name,
reference_doctype as link_doctype,
reference_name as link_name,
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
case
when parenttype='Assignment' then 'Notification'
else 'Chat'
end
as communication_type,
1 as seen
from `tabComment` where comment_doctype in ('Message', 'My Company')""", {"assignment": _("Assignment")})
def migrate_feed():
# migrate delete feed
for doctype in frappe.db.sql("""select distinct doc_type from `tabFeed` where subject=%(deleted)s""", {"deleted": _("Deleted")}):
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
concat_ws(" ", %(_doctype)s, doc_name) as subject,
owner as sender,
full_name as sender_full_name,
'Deleted' as comment_type,
creation as communication_date,
doc_type as reference_doctype,
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
'Comment' as communication_type,
1 as seen
from `tabFeed` where subject=%(deleted)s and doc_type=%(doctype)s""", {
"deleted": _("Deleted"),
"doctype": doctype,
"_doctype": _(doctype)
})
# migrate feed type login or empty
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
reference_name,
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
subject,
owner as sender,
full_name as sender_full_name,
case
when feed_type='Login' then 'Info'
else 'Updated'
end as comment_type,
creation as communication_date,
doc_type as reference_doctype,
doc_name as reference_name,
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
'Comment' as communication_type,
1 as seen
from `tabFeed` where (feed_type in ('Login', '') or feed_type is null)""")
def update_timeline_doc_for(timeline_doctype):
"""NOTE: This method may be used by other apps for patching. It also has COMMIT after each update."""
# find linked doctypes
# link fields
update_for_linked_docs(timeline_doctype)
# dynamic link fields
update_for_dynamically_linked_docs(timeline_doctype)
def update_for_linked_docs(timeline_doctype):
for df in get_link_fields(timeline_doctype):
if df.issingle:
continue
reference_doctype = df.parent
if not is_valid_timeline_doctype(reference_doctype, timeline_doctype):
continue
for doc in frappe.get_all(reference_doctype, fields=["name", df.fieldname]):
timeline_name = doc.get(df.fieldname)
update_communication(timeline_doctype, timeline_name, reference_doctype, doc.name)
def update_for_dynamically_linked_docs(timeline_doctype):
dynamic_link_fields = []
for query in dynamic_link_queries:
for df in frappe.db.sql(query, as_dict=True):
dynamic_link_fields.append(df)
for df in dynamic_link_fields:
reference_doctype = df.parent
if not is_valid_timeline_doctype(reference_doctype, timeline_doctype):
continue
try:
docs = frappe.get_all(reference_doctype, fields=["name", df.fieldname],
filters={ df.options: timeline_doctype })
except frappe.SQLError, e:
if e.args and e.args[0]==1146:
# single
continue
else:
raise
for doc in docs:
timeline_name = doc.get(df.fieldname)
update_communication(timeline_doctype, timeline_name, reference_doctype, doc.name)
def update_communication(timeline_doctype, timeline_name, reference_doctype, reference_name):
if not timeline_name:
return
frappe.db.sql("""update `tabCommunication` set timeline_doctype=%(timeline_doctype)s, timeline_name=%(timeline_name)s
where (reference_doctype=%(reference_doctype)s and reference_name=%(reference_name)s)
and (timeline_doctype is null or timeline_doctype='')
and (timeline_name is null or timeline_name='')""", {
"timeline_doctype": timeline_doctype,
"timeline_name": timeline_name,
"reference_doctype": reference_doctype,
"reference_name": reference_name
})
frappe.db.commit()
def is_valid_timeline_doctype(reference_doctype, timeline_doctype):
# for reloading timeline_field
frappe.reload_doctype(reference_doctype)
# make sure the timeline field's doctype is same as timeline doctype
meta = frappe.get_meta(reference_doctype)
if not meta.timeline_field:
return False
doctype = meta.get_link_doctype(meta.timeline_field)
if doctype != timeline_doctype:
return False
return True
| {
"content_hash": "17b9b84a1cf3ec3128c8e2a0264e49c0",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 130,
"avg_line_length": 24.695652173913043,
"alnum_prop": 0.6920368364030336,
"repo_name": "vCentre/vFRP-6233",
"id": "09eeaa63fc8a87f40a4f7b60139cab9a73970201",
"size": "7384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frappe/patches/v6_19/comment_feed_communication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "285476"
},
{
"name": "HTML",
"bytes": "162157"
},
{
"name": "JavaScript",
"bytes": "1098717"
},
{
"name": "Python",
"bytes": "1275798"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.testing import assert_allclose
from .... import units
from ....tests.helper import pytest, assert_quantity_allclose
from .. import LombScargle
ALL_METHODS = LombScargle.available_methods
ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != 'auto']
FAST_METHODS = [method for method in ALL_METHODS if 'fast' in method]
NTERMS_METHODS = [method for method in ALL_METHODS if 'chi2' in method]
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('minimum_frequency', [None, 1.0])
@pytest.mark.parametrize('maximum_frequency', [None, 5.0])
@pytest.mark.parametrize('nyquist_factor', [1, 10])
@pytest.mark.parametrize('samples_per_peak', [1, 5])
def test_autofrequency(data, minimum_frequency, maximum_frequency,
nyquist_factor, samples_per_peak):
t, y, dy = data
baseline = t.max() - t.min()
freq = LombScargle(t, y, dy).autofrequency(samples_per_peak,
nyquist_factor,
minimum_frequency,
maximum_frequency)
df = freq[1] - freq[0]
# Check sample spacing
assert_allclose(df, 1. / baseline / samples_per_peak)
# Check minimum frequency
if minimum_frequency is None:
assert_allclose(freq[0], 0.5 * df)
else:
assert_allclose(freq[0], minimum_frequency)
if maximum_frequency is None:
avg_nyquist = 0.5 * len(t) / baseline
assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5*df)
else:
assert_allclose(freq[-1], maximum_frequency, atol=0.5*df)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_all_methods(data, method, center_data, fit_mean,
with_errors, with_units, normalization):
if method == 'scipy' and (fit_mean or with_errors):
return
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if with_units:
t = t * units.day
y = y * units.mag
dy = dy * units.mag
frequency = frequency / t.unit
if not with_errors:
dy = None
kwds = dict(normalization=normalization)
ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean)
P_expected = ls.power(frequency, **kwds)
# don't use the fft approximation here; we'll test this elsewhere
if method in FAST_METHODS:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
if with_units:
if normalization == 'psd' and not with_errors:
assert P_method.unit == y.unit ** 2
else:
assert P_method.unit == units.dimensionless_unscaled
else:
assert not hasattr(P_method, 'unit')
assert_quantity_allclose(P_expected, P_method)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_integer_inputs(data, method, center_data, fit_mean, with_errors,
normalization):
if method == 'scipy' and (fit_mean or with_errors):
return
t, y, dy = data
t = np.floor(100 * t)
t_int = t.astype(int)
y = np.floor(100 * y)
y_int = y.astype(int)
dy = np.floor(100 * dy)
dy_int = dy.astype('int32')
frequency = 1E-2 * (0.8 + 0.01 * np.arange(40))
if not with_errors:
dy = None
dy_int = None
kwds = dict(center_data=center_data,
fit_mean=fit_mean)
P_float = LombScargle(t, y, dy, **kwds).power(frequency,
method=method,
normalization=normalization)
P_int = LombScargle(t_int, y_int, dy_int,
**kwds).power(frequency,
method=method,
normalization=normalization)
assert_allclose(P_float, P_int)
@pytest.mark.parametrize('method', NTERMS_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('nterms', [0, 2, 4])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_nterms_methods(method, center_data, fit_mean, with_errors,
nterms, normalization, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if not with_errors:
dy = None
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms)
kwds = dict(normalization=normalization)
if nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, method=method, **kwds)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_expected = ls.power(frequency, **kwds)
# don't use fast fft approximations here
if 'fast' in method:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25)
@pytest.mark.parametrize('method', FAST_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('nterms', [0, 1, 2])
def test_fast_approximations(method, center_data, fit_mean,
with_errors, nterms, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if not with_errors:
dy = None
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms)
# use only standard normalization because we compare via absolute tolerance
kwds = dict(method=method, normalization='standard')
if method == 'fast' and nterms != 1:
with pytest.raises(ValueError) as err:
ls.power(frequency, **kwds)
assert 'nterms' in str(err.value)
elif nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, **kwds)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_fast = ls.power(frequency, **kwds)
kwds['method_kwds'] = dict(use_fft=False)
P_slow = ls.power(frequency, **kwds)
assert_allclose(P_fast, P_slow, atol=0.008)
@pytest.mark.parametrize('method', LombScargle.available_methods)
@pytest.mark.parametrize('shape', [(), (1,), (2,), (3,), (2, 3)])
def test_output_shapes(method, shape, data):
t, y, dy = data
freq = np.asarray(np.zeros(shape))
freq.flat = np.arange(1, freq.size + 1)
PLS = LombScargle(t, y, fit_mean=False).power(freq, method=method)
assert PLS.shape == shape
@pytest.mark.parametrize('method', LombScargle.available_methods)
def test_errors_on_unit_mismatch(method, data):
t, y, dy = data
t = t * units.second
y = y * units.mag
frequency = np.linspace(0.5, 1.5, 10)
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, fit_mean=False).power(frequency, method=method)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, dy, fit_mean=False).power(frequency / t.unit)
assert str(err.value).startswith('Units of dy not equivalent')
# we don't test all normalizations here because they are tested above
# only test method='auto' because unit handling does not depend on method
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('normalization', ['standard', 'psd'])
@pytest.mark.parametrize('with_error', [True, False])
def test_unit_conversions(data, fit_mean, center_data,
normalization, with_error):
t, y, dy = data
t_day = t * units.day
t_hour = units.Quantity(t_day, 'hour')
y_meter = y * units.meter
y_millimeter = units.Quantity(y_meter, 'millimeter')
# sanity check on inputs
assert_quantity_allclose(t_day, t_hour)
assert_quantity_allclose(y_meter, y_millimeter)
if with_error:
dy = dy * units.meter
else:
dy = None
freq_day, P1 = LombScargle(t_day, y_meter, dy).autopower()
freq_hour, P2 = LombScargle(t_hour, y_millimeter, dy).autopower()
# Check units of frequency
assert freq_day.unit == 1. / units.day
assert freq_hour.unit == 1. / units.hour
# Check that results match
assert_quantity_allclose(freq_day, freq_hour)
assert_quantity_allclose(P1, P2)
# Check that switching frequency units doesn't change things
P3 = LombScargle(t_day, y_meter, dy).power(freq_hour)
P4 = LombScargle(t_hour, y_meter, dy).power(freq_day)
assert_quantity_allclose(P3, P4)
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('freq', [1.0, 2.0])
def test_model(fit_mean, with_units, freq):
rand = np.random.RandomState(0)
t = 10 * rand.rand(40)
params = 10 * rand.rand(3)
y = np.zeros_like(t)
if fit_mean:
y += params[0]
y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))
if with_units:
t = t * units.day
y = y * units.mag
freq = freq / units.day
ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean)
y_fit = ls.model(t, freq)
assert_quantity_allclose(y_fit, y)
@pytest.mark.parametrize('t_unit', [units.second, units.day])
@pytest.mark.parametrize('frequency_unit', [units.Hz, 1. / units.second])
@pytest.mark.parametrize('y_unit', [units.mag, units.jansky])
def test_model_units_match(data, t_unit, frequency_unit, y_unit):
t, y, dy = data
t_fit = t[:5]
frequency = 1.0
t = t * t_unit
t_fit = t_fit * t_unit
y = y * y_unit
dy = dy * y_unit
frequency = frequency * frequency_unit
ls = LombScargle(t, y, dy)
y_fit = ls.model(t_fit, frequency)
assert y_fit.unit == y_unit
def test_model_units_mismatch(data):
t, y, dy = data
frequency = 1.0
t_fit = t[:5]
t = t * units.second
t_fit = t_fit * units.second
y = y * units.mag
frequency = 1.0 / t.unit
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y).model(t_fit, frequency=1.0)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because t and t_fit units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y).model([1, 2], frequency)
assert str(err.value).startswith('Units of t not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, dy).model(t_fit, frequency)
assert str(err.value).startswith('Units of dy not equivalent')
def test_autopower(data):
t, y, dy = data
ls = LombScargle(t, y, dy)
kwargs = dict(samples_per_peak=6, nyquist_factor=2,
minimum_frequency=2, maximum_frequency=None)
freq1 = ls.autofrequency(**kwargs)
power1 = ls.power(freq1)
freq2, power2 = ls.autopower(**kwargs)
assert_allclose(freq1, freq2)
assert_allclose(power1, power2)
@pytest.fixture
def null_data(N=1000, dy=1, rseed=0):
"""Generate null hypothesis data"""
rng = np.random.RandomState(rseed)
t = 100 * rng.rand(N)
dy = 0.5 * dy * (1 + rng.rand(N))
y = dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_distribution(null_data, normalization):
t, y, dy = null_data
N = len(t)
ls = LombScargle(t, y, dy)
freq, power = ls.autopower(normalization=normalization,
maximum_frequency=40)
z = np.linspace(0, power.max(), 1000)
# Test that pdf and cdf are consistent
dz = z[1] - z[0]
z_mid = z[:-1] + 0.5 * dz
pdf = _lombscargle_pdf(z_mid, N, normalization=normalization)
cdf = _lombscargle_cdf(z, N, normalization=normalization)
assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)
# Test that observed power is distributed according to the theoretical pdf
hist, bins = np.histogram(power, 30, normed=True)
midpoints = 0.5 * (bins[1:] + bins[:-1])
pdf = _lombscargle_pdf(midpoints, N, normalization=normalization)
assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
# The following are convenience functions used to compute statistics of the
# periodogram under various normalizations; they are used in the preceding
# test.
def _lombscargle_pdf(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
pdf : np.ndarray
The expected probability density function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == 'model':
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == 'log':
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization))
def _lombscargle_cdf(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return 1 - np.exp(-z)
elif normalization == 'standard':
return 1 - (1 + z) ** (-0.5 * Nk)
elif normalization == 'model':
return 1 - (1 - z) ** (0.5 * Nk)
elif normalization == 'log':
return 1 - np.exp(-0.5 * Nk * z)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization))
| {
"content_hash": "c9b07e3cbfcb37fb1cf1224d37d8d9a0",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 79,
"avg_line_length": 34.131048387096776,
"alnum_prop": 0.6218914289089728,
"repo_name": "joergdietrich/astropy",
"id": "509247c12001955872c2f18cb0d873c6916fbcf5",
"size": "16929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/stats/lombscargle/tests/test_lombscargle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7616749"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import sys
from datetime import timedelta
from billiard.einfo import ExceptionInfo
from celery import result
from celery import states
from celery.utils import gen_unique_id
from djcelery.app import app
from djcelery.backends.cache import CacheBackend
from djcelery.tests.utils import unittest
class SomeClass(object):
def __init__(self, data):
self.data = data
class test_CacheBackend(unittest.TestCase):
def test_mark_as_done(self):
cb = CacheBackend(app=app)
tid = gen_unique_id()
self.assertEqual(cb.get_status(tid), states.PENDING)
self.assertIsNone(cb.get_result(tid))
cb.mark_as_done(tid, 42)
self.assertEqual(cb.get_status(tid), states.SUCCESS)
self.assertEqual(cb.get_result(tid), 42)
self.assertTrue(cb.get_result(tid), 42)
def test_forget(self):
b = CacheBackend(app=app)
tid = gen_unique_id()
b.mark_as_done(tid, {'foo': 'bar'})
self.assertEqual(b.get_result(tid).get('foo'), 'bar')
b.forget(tid)
self.assertNotIn(tid, b._cache)
self.assertIsNone(b.get_result(tid))
def test_save_restore_delete_group(self):
backend = CacheBackend(app=app)
group_id = gen_unique_id()
subtask_ids = [gen_unique_id() for i in range(10)]
subtasks = list(map(result.AsyncResult, subtask_ids))
res = result.GroupResult(group_id, subtasks)
res.save(backend=backend)
saved = result.GroupResult.restore(group_id, backend=backend)
self.assertListEqual(saved.subtasks, subtasks)
self.assertEqual(saved.id, group_id)
saved.delete(backend=backend)
self.assertIsNone(result.GroupResult.restore(group_id,
backend=backend))
def test_is_pickled(self):
cb = CacheBackend(app=app)
tid2 = gen_unique_id()
result = {'foo': 'baz', 'bar': SomeClass(12345)}
cb.mark_as_done(tid2, result)
# is serialized properly.
rindb = cb.get_result(tid2)
self.assertEqual(rindb.get('foo'), 'baz')
self.assertEqual(rindb.get('bar').data, 12345)
def test_mark_as_failure(self):
cb = CacheBackend(app=app)
einfo = None
tid3 = gen_unique_id()
try:
raise KeyError('foo')
except KeyError as exception:
einfo = ExceptionInfo(sys.exc_info())
cb.mark_as_failure(tid3, exception, traceback=einfo.traceback)
self.assertEqual(cb.get_status(tid3), states.FAILURE)
self.assertIsInstance(cb.get_result(tid3), KeyError)
self.assertEqual(cb.get_traceback(tid3), einfo.traceback)
def test_process_cleanup(self):
cb = CacheBackend(app=app)
cb.process_cleanup()
def test_set_expires(self):
cb1 = CacheBackend(app=app, expires=timedelta(seconds=16))
self.assertEqual(cb1.expires, 16)
cb2 = CacheBackend(app=app, expires=32)
self.assertEqual(cb2.expires, 32)
class test_custom_CacheBackend(unittest.TestCase):
def test_custom_cache_backend(self):
from celery import current_app
prev_backend = current_app.conf.CELERY_CACHE_BACKEND
prev_module = sys.modules['djcelery.backends.cache']
current_app.conf.CELERY_CACHE_BACKEND = 'dummy'
sys.modules.pop('djcelery.backends.cache')
try:
from djcelery.backends.cache import cache
from django.core.cache import cache as django_cache
self.assertEqual(cache.__class__.__module__,
'django.core.cache.backends.dummy')
self.assertIsNot(cache, django_cache)
finally:
current_app.conf.CELERY_CACHE_BACKEND = prev_backend
sys.modules['djcelery.backends.cache'] = prev_module
| {
"content_hash": "4298a4ab2cbf77c57a43d4d4c5d0ffc8",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 74,
"avg_line_length": 33.93043478260869,
"alnum_prop": 0.6335212711430036,
"repo_name": "kanemra/django-celery",
"id": "7bda97d2ad78ddd2eb9209a5f18631a9c5bdf7b2",
"size": "3902",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "djcelery/tests/test_backends/test_cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82"
},
{
"name": "HTML",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "166173"
},
{
"name": "Shell",
"bytes": "1905"
}
],
"symlink_target": ""
} |
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A Simple Random Fractal Tree
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import numpy as np
try:
import cairocffi as cairo
except ImportError:
import cairo
ITERATIONS = 16 # total number of iterations
ROOT_COLOR = np.array([0.0, 0.0, 0.0]) # root branch color
LEAF_COLOR = np.array([1.0, 1.0, 0.2]) # leaf color
TRUNK_LEN = 200 # initial length of the trunk
TRUNK_RAD = 3.0 # initial radius of the trunk
THETA = np.pi / 2 # initial angle of the branch
ANGLE = np.pi / 4.5 # angle between branches in the same level
PERTURB = 6.0 # perturb the angle a little to make the tree look random
RATIO = 0.8 # contraction factor between successive trunks
WIDTH = 600 # image width
HEIGHT = 600 # image height
ROOT = (WIDTH / 2.0, HEIGHT + 50) # pixel position of the root
def get_color(level):
"""
Return an interpolation of the two colors `ROOT_COLOR` and `LEAF_COLOR`.
"""
a = float(level) / ITERATIONS
return a * ROOT_COLOR + (1 - a) * LEAF_COLOR
def get_line_width(level):
"""Return the line width of a given level."""
return max(1, TRUNK_RAD * level / ITERATIONS)
def fractal_tree(
ctx, # a cairo context to draw on
level, # current level in the iterations
start, # (x, y) coordinates of the start of this trunk
t, # current trunk length
r, # factor to contract the trunk in each iteration
theta, # orientation of current trunk
angle, # angle between branches in the same level
perturb, # perturb the angle
):
if level == 0:
return
x0, y0 = start
# randomize the length
randt = np.random.random() * t
x, y = x0 + randt * np.cos(theta), y0 - randt * np.sin(theta)
color = get_color(level)
ctx.move_to(x0, y0)
ctx.line_to(x, y)
ctx.set_line_width(get_line_width(level))
ctx.set_source_rgb(*color)
ctx.stroke()
theta1 = theta + np.random.random() * (perturb / level) * angle
theta2 = theta - np.random.random() * (perturb / level) * angle
# recursively draw the next branches
fractal_tree(ctx, level - 1, (x, y), t * r, r, theta1, angle, perturb)
fractal_tree(ctx, level - 1, (x, y), t * r, r, theta2, angle, perturb)
def main():
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
ctx = cairo.Context(surface)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
ctx.set_source_rgb(1, 1, 1)
ctx.paint()
fractal_tree(ctx, ITERATIONS, ROOT, TRUNK_LEN, RATIO, THETA, ANGLE, PERTURB)
surface.write_to_png("random_fractal_tree.png")
if __name__ == "__main__":
main()
| {
"content_hash": "5294d208a68415bb7dba00e90a787e53",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 80,
"avg_line_length": 30.941176470588236,
"alnum_prop": 0.6315589353612168,
"repo_name": "neozhaoliang/pywonderland",
"id": "8f190fa66dc9dc758ea4e4af32c1fc0ac8f752ed",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/misc/fractaltree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "201018"
},
{
"name": "Jupyter Notebook",
"bytes": "13368"
},
{
"name": "POV-Ray SDL",
"bytes": "15071"
},
{
"name": "Python",
"bytes": "271784"
},
{
"name": "Shell",
"bytes": "200"
}
],
"symlink_target": ""
} |
"""
NSQ base reader class.
This receives messages from nsqd and calls task methods to process that message
It handles the logic for backing off on retries and giving up on a message
ex.
import nsq
def task1(message):
print message
return True
def task2(message):
print message
return True
all_tasks = {"task1": task1, "task2": task2}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['127.0.0.1:4161'],
topic="nsq_reader", channel="asdf", lookupd_poll_interval=15)
nsq.run()
"""
import logging
import os
import ujson as json
import time
import signal
import socket
import functools
import urllib
import tornado.options
import tornado.ioloop
import tornado.httpclient
import BackoffTimer
import nsq
import async
tornado.options.define('heartbeat_file', type=str, default=None, help="path to a file to touch for heartbeats")
class RequeueWithoutBackoff(Exception):
"""exception for requeueing a message without incrementing backoff"""
pass
class Reader(object):
def __init__(self, all_tasks, topic, channel,
nsqd_tcp_addresses=None, lookupd_http_addresses=None,
max_tries=5, max_in_flight=1, requeue_delay=90, lookupd_poll_interval=120,
preprocess_method=None, validate_method=None, async=False):
"""
Reader provides a loop that calls each task provided by ``all_tasks`` up to ``max_tries``
requeueing on any failures with increasing multiples of ``requeue_delay`` between subsequent
tries of each message.
``preprocess_method`` defines an optional method that can alter the message data before
other task functions are called.
``validate_method`` defines an optional method that returns a boolean as to weather or not
this message should be processed.
``all_tasks`` defines the a mapping of tasks and functions that individually will be called
with the message data.
``async`` determines whether handlers will do asynchronous processing. If set to True, handlers
must accept a keyword argument called "finisher" that will be a callable used to signal message
completion, with a boolean argument indicating success
"""
assert isinstance(all_tasks, dict)
for key, method in all_tasks.items():
assert callable(method), "key %s must have a callable value" % key
if preprocess_method:
assert callable(preprocess_method)
if validate_method:
assert callable(validate_method)
assert isinstance(topic, (str, unicode)) and len(topic) > 0
assert isinstance(channel, (str, unicode)) and len(channel) > 0
assert isinstance(max_in_flight, int) and 0 < max_in_flight < 2500
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, (str, unicode))
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, (str, unicode))
lookupd_http_addresses = [lookupd_http_addresses]
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.requeue_delay = int(requeue_delay * 1000)
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.lookupd_poll_interval = lookupd_poll_interval
self.async=async
self.task_lookup = all_tasks
self.preprocess_method = preprocess_method
self.validate_method = validate_method
self.backoff_timer = dict((k, BackoffTimer.BackoffTimer(0, 120)) for k in self.task_lookup.keys())
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.conns = {}
self.http_client = tornado.httpclient.AsyncHTTPClient()
logging.info("starting reader for topic '%s'..." % self.topic)
for task in self.task_lookup:
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port), task)
# trigger the first one manually
self.query_lookupd()
tornado.ioloop.PeriodicCallback(self.query_lookupd, self.lookupd_poll_interval * 1000).start()
def callback(self, conn, task, message):
body = message.body
try:
if self.preprocess_method:
body = self.preprocess_method(body)
if self.validate_method and not self.validate_method(body):
return self.finish(conn, message.id)
except Exception:
logging.exception('[%s] caught exception while preprocessing' % conn)
return self.requeue(conn, message)
method_callback = self.task_lookup[task]
try:
if self.async:
# this handler accepts the finisher callable as a keyword arg
finisher = functools.partial(self._client_callback, message=message, task=task, conn=conn)
return method_callback(body, finisher=finisher)
else:
# this is an old-school sync handler, give it just the message
if method_callback(body):
self.backoff_timer[task].success()
return self.finish(conn, message.id)
self.backoff_timer[task].failure()
except RequeueWithoutBackoff:
logging.info('RequeueWithoutBackoff')
except Exception:
logging.exception('[%s] caught exception while handling %s' % (conn, task))
self.backoff_timer[task].failure()
return self.requeue(conn, message)
def _client_callback(self, success, message=None, task=None, conn=None):
'''
This is the method that an asynchronous nsqreader should call to indicate
async completion of a message. This will most likely be exposed as the finisher
callable created in `callback` above with some functools voodoo
'''
if success:
self.backoff_timer[task].success()
self.finish(conn, message.id)
else:
self.backoff_timer[task].failure()
self.requeue(conn, message)
def requeue(self, conn, message, delay=True):
if message.attempts > self.max_tries:
logging.warning('[%s] giving up on message after max tries %s' % (conn, str(message.body)))
return self.finish(conn, message.id)
try:
# ms
requeue_delay = self.requeue_delay * message.attempts if delay else 0
conn.send(nsq.requeue(message.id, str(requeue_delay)))
except Exception:
conn.close()
logging.exception('[%s] failed to send requeue %s @ %d' % (conn, message.id, requeue_delay))
def finish(self, conn, message_id):
'''
This is an internal method for NSQReader
'''
try:
conn.send(nsq.finish(message_id))
except Exception:
conn.close()
logging.exception('[%s] failed to send finish %s' % (conn, message_id))
def connection_max_in_flight(self):
return max(1, self.max_in_flight / max(1, len(self.conns)))
def handle_message(self, conn, task, message):
conn.ready -= 1
# update ready count if necessary...
# if we're in a backoff state for this task
# set a timer to actually send the ready update
per_conn = self.connection_max_in_flight()
if not conn.is_sending_ready and (conn.ready <= 1 or conn.ready < int(per_conn * 0.25)):
backoff_interval = self.backoff_timer[task].get_interval()
if backoff_interval > 0:
conn.is_sending_ready = True
logging.info('[%s] backing off for %0.2f seconds' % (conn, backoff_interval))
send_ready_callback = functools.partial(self.send_ready, conn, per_conn)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + backoff_interval, send_ready_callback)
else:
self.send_ready(conn, per_conn)
try:
message.body = json.loads(message.body)
except Exception:
logging.warning('[%s] invalid JSON: %s' % (conn, str(message.body)))
return
logging.info('[%s] handling %s for %s' % (conn, task, str(message.body)))
self.callback(conn, task, message)
def send_ready(self, conn, value):
try:
conn.send(nsq.ready(value))
conn.ready = value
except Exception:
conn.close()
logging.exception('[%s] failed to send ready' % conn)
conn.is_sending_ready = False
def update_heartbeat(self):
heartbeat_file = tornado.options.options.heartbeat_file
if not heartbeat_file:
return
try:
open(heartbeat_file, 'a').close()
os.utime(heartbeat_file, None)
except Exception:
logging.exception('failed touching heartbeat file')
def _data_callback(self, conn, raw_data, task):
frame, data = nsq.unpack_response(raw_data)
if frame == nsq.FRAME_TYPE_MESSAGE:
message = nsq.decode_message(data)
try:
self.handle_message(conn, task, message)
except Exception:
logging.exception('[%s] failed to handle_message() %r' % (conn, message))
elif frame == nsq.FRAME_TYPE_RESPONSE and data == "_heartbeat_":
self.update_heartbeat()
conn.send(nsq.nop())
def connect_to_nsqd(self, address, port, task):
assert isinstance(address, (str, unicode))
assert isinstance(port, int)
conn_id = address + ':' + str(port) + ':' + task
if conn_id in self.conns:
return
logging.info("[%s] connecting to nsqd for '%s'", address + ':' + str(port), task)
connect_callback = functools.partial(self._connect_callback, task=task)
data_callback = functools.partial(self._data_callback, task=task)
close_callback = functools.partial(self._close_callback, task=task)
conn = async.AsyncConn(address, port, connect_callback, data_callback, close_callback)
conn.connect()
self.conns[conn_id] = conn
def _connect_callback(self, conn, task):
if len(self.task_lookup) > 1:
channel = self.channel + '.' + task
else:
channel = self.channel
initial_ready = self.connection_max_in_flight()
try:
conn.send(nsq.subscribe(self.topic, channel, self.short_hostname, self.hostname))
conn.send(nsq.ready(initial_ready))
conn.ready = initial_ready
conn.is_sending_ready = False
except Exception:
conn.close()
logging.exception('[%s] failed to bootstrap connection' % conn)
def _close_callback(self, conn, task):
conn_id = str(conn) + ':' + task
if conn_id in self.conns:
del self.conns[conn_id]
if len(self.conns) == 0 and len(self.lookupd_http_addresses) == 0:
logging.warning("all connections closed and no lookupds... exiting")
tornado.ioloop.IOLoop.instance().stop()
def query_lookupd(self):
for endpoint in self.lookupd_http_addresses:
lookupd_url = endpoint + "/lookup?topic=" + urllib.quote(self.topic)
req = tornado.httpclient.HTTPRequest(lookupd_url, method="GET",
connect_timeout=1, request_timeout=2)
callback = functools.partial(self._finish_query_lookupd, endpoint=endpoint)
self.http_client.fetch(req, callback=callback)
def _finish_query_lookupd(self, response, endpoint):
if response.error:
logging.warning("[%s] lookupd error %s", endpoint, response.error)
return
try:
lookup_data = json.loads(response.body)
except json.JSONDecodeError:
logging.warning("[%s] failed to parse JSON from lookupd: %r", endpoint, response.body)
return
if lookup_data['status_code'] != 200:
logging.warning("[%s] lookupd responded with %d", endpoint, lookup_data['status_code'])
return
for task in self.task_lookup:
for producer in lookup_data['data']['producers']:
self.connect_to_nsqd(producer['address'], producer['tcp_port'], task)
def _handle_term_signal(sig_num, frame):
logging.info('TERM Signal handler called with signal %r' % sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
| {
"content_hash": "b28b15e43ff7f45e2868ed0c7c36aca4",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 113,
"avg_line_length": 40.570570570570574,
"alnum_prop": 0.6015544041450778,
"repo_name": "davegardnerisme/nsq",
"id": "273205be6ed86e3fb0eebd75dd71d735e99c42b9",
"size": "13510",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pynsq/nsq/NSQReader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "212597"
},
{
"name": "Python",
"bytes": "23094"
},
{
"name": "Shell",
"bytes": "2175"
}
],
"symlink_target": ""
} |
"""
support for presenting detailed information in failing assertions.
"""
import sys
from typing import Any
from typing import Generator
from typing import List
from typing import Optional
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
from _pytest.assertion.rewrite import assertstate_key
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
if TYPE_CHECKING:
from _pytest.main import Session
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("debugconfig")
group.addoption(
"--assert",
action="store",
dest="assertmode",
choices=("rewrite", "plain"),
default="rewrite",
metavar="MODE",
help=(
"Control assertion debugging tools.\n"
"'plain' performs no assertion debugging.\n"
"'rewrite' (the default) rewrites assert statements in test modules"
" on import to provide assert expression information."
),
)
parser.addini(
"enable_assertion_pass_hook",
type="bool",
default=False,
help="Enables the pytest_assertion_pass hook."
"Make sure to delete any previously generated pyc cache files.",
)
def register_assert_rewrite(*names: str) -> None:
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raise TypeError: if the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = "expected module names as *args, got {0} instead"
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
# TODO(typing): Add a protocol for mark_rewrite() and use it
# for importhook and for PytestPluginManager.rewrite_hook.
importhook = DummyRewriteHook() # type: ignore
importhook.mark_rewrite(*names)
class DummyRewriteHook:
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names: str) -> None:
pass
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config: Config, mode) -> None:
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook = None # type: Optional[rewrite.AssertionRewritingHook]
def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
"""Try to install the rewrite hook, raise SystemError if it fails."""
config._store[assertstate_key] = AssertionState(config, "rewrite")
config._store[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config._store[assertstate_key].trace("installed rewrite import hook")
def undo() -> None:
hook = config._store[assertstate_key].hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session: "Session") -> None:
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
assertstate = session.config._store.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
@hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
"""Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks
The rewrite module will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
ihook = item.ihook
def callbinrepr(op, left: object, right: object) -> Optional[str]:
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right
)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = "\n~".join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
return None
saved_assert_hooks = util._reprcompare, util._assertion_pass
util._reprcompare = callbinrepr
if ihook.pytest_assertion_pass.get_hookimpls():
def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None:
ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl)
util._assertion_pass = call_assertion_pass_hook
yield
util._reprcompare, util._assertion_pass = saved_assert_hooks
def pytest_sessionfinish(session: "Session") -> None:
assertstate = session.config._store.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
def pytest_assertrepr_compare(
config: Config, op: str, left: Any, right: Any
) -> Optional[List[str]]:
return util.assertrepr_compare(config=config, op=op, left=left, right=right)
| {
"content_hash": "92ff3671b1be6ea0c01ee28b2ff918de",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 87,
"avg_line_length": 35.23626373626374,
"alnum_prop": 0.6650553563075003,
"repo_name": "JoelMarcey/buck",
"id": "64d2267e70af4b89015a02d186f653e0a9f37a9e",
"size": "6413",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "third-party/py/pytest/src/_pytest/assertion/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
"""pylint packaging information"""
from __future__ import absolute_import
import sys
from os.path import join
modname = distname = 'pylint'
numversion = (1, 5, 5)
version = '.'.join([str(num) for num in numversion])
install_requires = [
'astroid>=1.4.5,<1.5.0',
'six',
]
if sys.platform == 'win32':
install_requires.append('colorama')
license = 'GPL'
description = "python code static checker"
web = 'http://www.pylint.org'
mailinglist = "mailto://code-quality@python.org"
author = 'Logilab'
author_email = 'python-projects@lists.logilab.org'
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing'
]
long_desc = """\
Pylint is a Python source code analyzer which looks for programming
errors, helps enforcing a coding standard and sniffs for some code
smells (as defined in Martin Fowler's Refactoring book)
.
Pylint can be seen as another PyChecker since nearly all tests you
can do with PyChecker can also be done with Pylint. However, Pylint
offers some more features, like checking length of lines of code,
checking if variable names are well-formed according to your coding
standard, or checking if declared interfaces are truly implemented,
and much more.
.
Additionally, it is possible to write plugins to add your own checks.
.
Pylint is shipped with "pylint-gui", "pyreverse" (UML diagram generator)
and "symilar" (an independent similarities checker)."""
scripts = [join('bin', filename)
for filename in ('pylint', 'pylint-gui', "symilar", "epylint",
"pyreverse")]
include_dirs = [join('pylint', 'test')]
| {
"content_hash": "871b40f8f31a44d42741ca7c13b7c912",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 33.890625,
"alnum_prop": 0.6542185338865837,
"repo_name": "mith1979/ansible_automation",
"id": "f5eea15ed2d1a44e8ed63a3d9ead38a5c57e26fd",
"size": "3012",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pylint/__pkginfo__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"""Volume V2 Volume action implementations"""
import copy
import six
from openstackclient.common import command
from openstackclient.common import parseractions
from openstackclient.common import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
class CreateVolume(command.ShowOne):
"""Create new volume"""
def get_parser(self, prog_name):
parser = super(CreateVolume, self).get_parser(prog_name)
parser.add_argument(
"name",
metavar="<name>",
help=_("Volume name"),
)
parser.add_argument(
"--size",
metavar="<size>",
type=int,
required=True,
help=_("Volume size in GB"),
)
parser.add_argument(
"--type",
metavar="<volume-type>",
help=_("Set the type of volume"),
)
parser.add_argument(
"--image",
metavar="<image>",
help=_("Use <image> as source of volume (name or ID)"),
)
parser.add_argument(
"--snapshot",
metavar="<snapshot>",
help=_("Use <snapshot> as source of volume (name or ID)"),
)
parser.add_argument(
"--source",
metavar="<volume>",
help=_("Volume to clone (name or ID)"),
)
parser.add_argument(
"--description",
metavar="<description>",
help=_("Volume description"),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify an alternate user (name or ID)'),
)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify an alternate project (name or ID)'),
)
parser.add_argument(
"--availability-zone",
metavar="<availability-zone>",
help=_("Create volume in <availability-zone>"),
)
parser.add_argument(
"--property",
metavar="<key=value>",
action=parseractions.KeyValueAction,
help=_("Set a property to this volume "
"(repeat option to set multiple properties)"),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
volume_client = self.app.client_manager.volume
image_client = self.app.client_manager.image
source_volume = None
if parsed_args.source:
source_volume = utils.find_resource(
volume_client.volumes,
parsed_args.source).id
image = None
if parsed_args.image:
image = utils.find_resource(
image_client.images,
parsed_args.image).id
snapshot = None
if parsed_args.snapshot:
snapshot = utils.find_resource(
volume_client.volume_snapshots,
parsed_args.snapshot).id
project = None
if parsed_args.project:
project = utils.find_resource(
identity_client.projects,
parsed_args.project).id
user = None
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user).id
volume = volume_client.volumes.create(
size=parsed_args.size,
snapshot_id=snapshot,
name=parsed_args.name,
description=parsed_args.description,
volume_type=parsed_args.type,
user_id=user,
project_id=project,
availability_zone=parsed_args.availability_zone,
metadata=parsed_args.property,
imageRef=image,
source_volid=source_volume
)
# Remove key links from being displayed
volume._info.update(
{
'properties': utils.format_dict(volume._info.pop('metadata')),
'type': volume._info.pop('volume_type')
}
)
volume._info.pop("links", None)
return zip(*sorted(six.iteritems(volume._info)))
class DeleteVolume(command.Command):
"""Delete volume(s)"""
def get_parser(self, prog_name):
parser = super(DeleteVolume, self).get_parser(prog_name)
parser.add_argument(
"volumes",
metavar="<volume>",
nargs="+",
help=_("Volume(s) to delete (name or ID)")
)
parser.add_argument(
"--force",
dest="force",
action="store_true",
default=False,
help=_("Attempt forced removal of volume(s), regardless of state "
"(defaults to False)")
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
for volume in parsed_args.volumes:
volume_obj = utils.find_resource(
volume_client.volumes, volume)
if parsed_args.force:
volume_client.volumes.force_delete(volume_obj.id)
else:
volume_client.volumes.delete(volume_obj.id)
class ListVolume(command.Lister):
"""List volumes"""
def get_parser(self, prog_name):
parser = super(ListVolume, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Filter results by project (name or ID) (admin only)')
)
identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Filter results by user (name or ID) (admin only)')
)
identity_common.add_user_domain_option_to_parser(parser)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Filter results by volume name'),
)
parser.add_argument(
'--status',
metavar='<status>',
help=_('Filter results by status'),
)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_('Include all projects (admin only)'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
compute_client = self.app.client_manager.compute
identity_client = self.app.client_manager.identity
def _format_attach(attachments):
"""Return a formatted string of a volume's attached instances
:param attachments: a volume.attachments field
:rtype: a string of formatted instances
"""
msg = ''
for attachment in attachments:
server = attachment['server_id']
if server in server_cache:
server = server_cache[server].name
device = attachment['device']
msg += 'Attached to %s on %s ' % (server, device)
return msg
if parsed_args.long:
columns = [
'ID',
'Name',
'Status',
'Size',
'Volume Type',
'Bootable',
'Attachments',
'Metadata',
]
column_headers = copy.deepcopy(columns)
column_headers[1] = 'Display Name'
column_headers[4] = 'Type'
column_headers[6] = 'Attached to'
column_headers[7] = 'Properties'
else:
columns = [
'ID',
'Name',
'Status',
'Size',
'Attachments',
]
column_headers = copy.deepcopy(columns)
column_headers[1] = 'Display Name'
column_headers[4] = 'Attached to'
# Cache the server list
server_cache = {}
try:
for s in compute_client.servers.list():
server_cache[s.id] = s
except Exception:
# Just forget it if there's any trouble
pass
project_id = None
if parsed_args.project:
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain)
user_id = None
if parsed_args.user:
user_id = identity_common.find_user(identity_client,
parsed_args.user,
parsed_args.user_domain)
search_opts = {
'all_tenants': parsed_args.all_projects,
'project_id': project_id,
'user_id': user_id,
'display_name': parsed_args.name,
'status': parsed_args.status,
}
data = volume_client.volumes.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Metadata': utils.format_dict,
'Attachments': _format_attach},
) for s in data))
class SetVolume(command.Command):
"""Set volume properties"""
def get_parser(self, prog_name):
parser = super(SetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New volume name'),
)
parser.add_argument(
'--size',
metavar='<size>',
type=int,
help=_('Extend volume size in GB'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New volume description'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a property on this volume '
'(repeat option to set multiple properties)'),
)
parser.add_argument(
'--image-property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set an image property on this volume '
'(repeat option to set multiple image properties)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
if parsed_args.size:
if volume.status != 'available':
self.app.log.error(_("Volume is in %s state, it must be "
"available before size can be extended") %
volume.status)
return
if parsed_args.size <= volume.size:
self.app.log.error(_("New size must be greater than %s GB") %
volume.size)
return
volume_client.volumes.extend(volume.id, parsed_args.size)
if parsed_args.property:
volume_client.volumes.set_metadata(volume.id, parsed_args.property)
if parsed_args.image_property:
volume_client.volumes.set_image_metadata(
volume.id, parsed_args.image_property)
kwargs = {}
if parsed_args.name:
kwargs['display_name'] = parsed_args.name
if parsed_args.description:
kwargs['display_description'] = parsed_args.description
if kwargs:
volume_client.volumes.update(volume.id, **kwargs)
class ShowVolume(command.ShowOne):
"""Display volume details"""
def get_parser(self, prog_name):
parser = super(ShowVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar="<volume-id>",
help=_("Volume to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
# Special mapping for columns to make the output easier to read:
# 'metadata' --> 'properties'
# 'volume_type' --> 'type'
volume._info.update(
{
'properties': utils.format_dict(volume._info.pop('metadata')),
'type': volume._info.pop('volume_type'),
},
)
# Remove key links from being displayed
volume._info.pop("links", None)
return zip(*sorted(six.iteritems(volume._info)))
class UnsetVolume(command.Command):
"""Unset volume properties"""
def get_parser(self, prog_name):
parser = super(UnsetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
help=_('Remove a property from volume '
'(repeat option to remove multiple properties)'),
)
parser.add_argument(
'--image-property',
metavar='<key>',
action='append',
help=_('Remove an image property from volume '
'(repeat option to remove multiple image properties)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(
volume_client.volumes, parsed_args.volume)
if parsed_args.property:
volume_client.volumes.delete_metadata(
volume.id, parsed_args.property)
if parsed_args.image_property:
volume_client.volumes.delete_image_metadata(
volume.id, parsed_args.image_property)
if (not parsed_args.image_property and not parsed_args.property):
self.app.log.error(_("No changes requested\n"))
| {
"content_hash": "0c25150b1c3d12032e1d16d2efcfd210",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 79,
"avg_line_length": 32.95056179775281,
"alnum_prop": 0.52090295301098,
"repo_name": "redhat-openstack/python-openstackclient",
"id": "18473da3c26fb041e0eb7e51bfc6595571d257ed",
"size": "15231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-patches",
"path": "openstackclient/volume/v2/volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2229284"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ApplicationGatewayBackendHealthPool(Model):
"""Application gateway BackendHealth pool.
:param backend_address_pool: Reference of an
ApplicationGatewayBackendAddressPool resource.
:type backend_address_pool:
~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayBackendAddressPool
:param backend_http_settings_collection: List of
ApplicationGatewayBackendHealthHttpSettings resources.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayBackendHealthHttpSettings]
"""
_attribute_map = {
'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'},
'backend_http_settings_collection': {'key': 'backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHealthHttpSettings]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayBackendHealthPool, self).__init__(**kwargs)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.backend_http_settings_collection = kwargs.get('backend_http_settings_collection', None)
| {
"content_hash": "1ec290f00c4952b728423ea905c6de07",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 142,
"avg_line_length": 47.4,
"alnum_prop": 0.7459915611814346,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "e40a1bab64c37291d72ba7426b400d1649750588",
"size": "1659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/application_gateway_backend_health_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
class NovaQuota(resource.Resource):
"""A resource for creating nova quotas.
Nova Quota is used to manage operational limits for projects. Currently,
this resource can manage Nova's quotas for:
- cores
- fixed_ips
- floating_ips
- instances
- injected_files
- injected_file_content_bytes
- injected_file_path_bytes
- key_pairs
- metadata_items
- ram
- security_groups
- security_group_rules
- server_groups
- server_group_members
Note that default nova security policy usage of this resource
is limited to being used by administrators only. Administrators should be
careful to create only one Nova Quota resource per project, otherwise
it will be hard for them to manage the quota properly.
"""
support_status = support.SupportStatus(version='8.0.0')
default_client_name = 'nova'
entity = 'quotas'
required_service_extension = 'os-quota-sets'
PROPERTIES = (
PROJECT, CORES, FIXED_IPS, FLOATING_IPS, INSTANCES,
INJECTED_FILES, INJECTED_FILE_CONTENT_BYTES, INJECTED_FILE_PATH_BYTES,
KEYPAIRS, METADATA_ITEMS, RAM, SECURITY_GROUPS, SECURITY_GROUP_RULES,
SERVER_GROUPS, SERVER_GROUP_MEMBERS
) = (
'project', 'cores', 'fixed_ips', 'floating_ips', 'instances',
'injected_files', 'injected_file_content_bytes',
'injected_file_path_bytes', 'key_pairs', 'metadata_items', 'ram',
'security_groups', 'security_group_rules', 'server_groups',
'server_group_members'
)
properties_schema = {
PROJECT: properties.Schema(
properties.Schema.STRING,
_('Name or id of the project to set the quota for.'),
required=True,
constraints=[
constraints.CustomConstraint('keystone.project')
]
),
CORES: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of cores. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
FIXED_IPS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of fixed IPs. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
FLOATING_IPS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of floating IPs. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
INSTANCES: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of instances. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
INJECTED_FILES: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of injected files. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
INJECTED_FILE_CONTENT_BYTES: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of injected file content bytes. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
INJECTED_FILE_PATH_BYTES: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of injected file path bytes. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
KEYPAIRS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of key pairs. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
METADATA_ITEMS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of metadata items. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
RAM: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the amount of ram (in megabytes). '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of security groups. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
SECURITY_GROUP_RULES: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of security group rules. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
SERVER_GROUPS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of server groups. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
),
SERVER_GROUP_MEMBERS: properties.Schema(
properties.Schema.INTEGER,
_('Quota for the number of server group members. '
'Setting the value to -1 removes the limit.'),
constraints=[
constraints.Range(min=-1),
],
update_allowed=True
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.PROJECT],
client_plugin=self.client_plugin('keystone'),
finder='get_project_id')
]
def handle_create(self):
self._set_quota()
self.resource_id_set(self.physical_resource_name())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._set_quota(json_snippet.properties(self.properties_schema,
self.context))
def _set_quota(self, props=None):
if props is None:
props = self.properties
kwargs = dict((k, v) for k, v in props.items()
if k != self.PROJECT and v is not None)
self.client().quotas.update(props.get(self.PROJECT), **kwargs)
def handle_delete(self):
self.client().quotas.delete(self.properties[self.PROJECT])
def validate(self):
super(NovaQuota, self).validate()
if sum(1 for p in self.properties.values() if p is not None) <= 1:
raise exception.PropertyUnspecifiedError(
*sorted(set(self.PROPERTIES) - {self.PROJECT}))
def resource_mapping():
return {
'OS::Nova::Quota': NovaQuota
}
| {
"content_hash": "272f79c7f19a036659bea59fd3385efd",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 78,
"avg_line_length": 34.675213675213676,
"alnum_prop": 0.5522553611042642,
"repo_name": "noironetworks/heat",
"id": "269fcdaa72a73863ac25d426080b32162475109d",
"size": "8689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/nova/quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
} |
from token import Token # pylint: ignore=unused-import
from simple import Simple # pylint: ignore=unused-import
from jwt import JWT # pylint: ignore=unused-import
| {
"content_hash": "136cfd00efc58cef0b6b8decbc377c42",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 57,
"avg_line_length": 55.333333333333336,
"alnum_prop": 0.7831325301204819,
"repo_name": "KujiraProject/Flask-PAM",
"id": "981420a6614321445a0a34dcdadab7db51b6c0b1",
"size": "166",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flask_pam/token/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15853"
},
{
"name": "Shell",
"bytes": "159"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_tenant_ep_retention_policy
short_description: Manage End Point (EP) retention protocol policies (fv:EpRetPol)
description:
- Manage End Point (EP) retention protocol policies on Cisco ACI fabrics.
version_added: '2.4'
options:
tenant:
description:
- The name of an existing tenant.
type: str
aliases: [ tenant_name ]
epr_policy:
description:
- The name of the end point retention policy.
type: str
aliases: [ epr_name, name ]
bounce_age:
description:
- Bounce entry aging interval in seconds.
- Accepted values range between C(150) and C(65535); 0 is used for infinite.
- The APIC defaults to C(630) when unset during creation.
type: int
bounce_trigger:
description:
- Determines if the bounce entries are installed by RARP Flood or COOP Protocol.
- The APIC defaults to C(coop) when unset during creation.
type: str
choices: [ coop, flood ]
hold_interval:
description:
- Hold interval in seconds.
- Accepted values range between C(5) and C(65535).
- The APIC defaults to C(300) when unset during creation.
type: int
local_ep_interval:
description:
- Local end point aging interval in seconds.
- Accepted values range between C(120) and C(65535); 0 is used for infinite.
- The APIC defaults to C(900) when unset during creation.
type: int
remote_ep_interval:
description:
- Remote end point aging interval in seconds.
- Accepted values range between C(120) and C(65535); 0 is used for infinite.
- The APIC defaults to C(300) when unset during creation.
type: int
move_frequency:
description:
- Move frequency per second.
- Accepted values range between C(0) and C(65535); 0 is used for none.
- The APIC defaults to C(256) when unset during creation.
type: int
description:
description:
- Description for the End point retention policy.
type: str
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
seealso:
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:EpRetPol).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Swetha Chunduri (@schunduri)
'''
EXAMPLES = r'''
- name: Add a new EPR policy
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
bounce_age: 630
hold_interval: 300
local_ep_interval: 900
remote_ep_interval: 300
move_frequency: 256
description: test
state: present
delegate_to: localhost
- name: Remove an EPR policy
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
state: absent
delegate_to: localhost
- name: Query an EPR policy
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
state: query
delegate_to: localhost
register: query_result
- name: Query all EPR policies
aci_tenant_ep_retention_policy:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
BOUNCE_TRIG_MAPPING = dict(
coop='protocol',
rarp='rarp-flood',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
epr_policy=dict(type='str', aliases=['epr_name', 'name']), # Not required for querying all objects
bounce_age=dict(type='int'),
bounce_trigger=dict(type='str', choices=['coop', 'flood']),
hold_interval=dict(type='int'),
local_ep_interval=dict(type='int'),
remote_ep_interval=dict(type='int'),
description=dict(type='str', aliases=['descr']),
move_frequency=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epr_policy', 'tenant']],
['state', 'present', ['epr_policy', 'tenant']],
],
)
epr_policy = module.params['epr_policy']
bounce_age = module.params['bounce_age']
if bounce_age is not None and bounce_age != 0 and bounce_age not in range(150, 65536):
module.fail_json(msg="The bounce_age must be a value of 0 or between 150 and 65535")
if bounce_age == 0:
bounce_age = 'infinite'
bounce_trigger = module.params['bounce_trigger']
if bounce_trigger is not None:
bounce_trigger = BOUNCE_TRIG_MAPPING[bounce_trigger]
description = module.params['description']
hold_interval = module.params['hold_interval']
if hold_interval is not None and hold_interval not in range(5, 65536):
module.fail_json(msg="The hold_interval must be a value between 5 and 65535")
local_ep_interval = module.params['local_ep_interval']
if local_ep_interval is not None and local_ep_interval != 0 and local_ep_interval not in range(120, 65536):
module.fail_json(msg="The local_ep_interval must be a value of 0 or between 120 and 65535")
if local_ep_interval == 0:
local_ep_interval = "infinite"
move_frequency = module.params['move_frequency']
if move_frequency is not None and move_frequency not in range(65536):
module.fail_json(msg="The move_frequency must be a value between 0 and 65535")
if move_frequency == 0:
move_frequency = "none"
remote_ep_interval = module.params['remote_ep_interval']
if remote_ep_interval is not None and remote_ep_interval not in range(120, 65536):
module.fail_json(msg="The remote_ep_interval must be a value of 0 or between 120 and 65535")
if remote_ep_interval == 0:
remote_ep_interval = "infinite"
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvEpRetPol',
aci_rn='epRPol-{0}'.format(epr_policy),
module_object=epr_policy,
target_filter={'name': epr_policy},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvEpRetPol',
class_config=dict(
name=epr_policy,
descr=description,
bounceAgeIntvl=bounce_age,
bounceTrig=bounce_trigger,
holdIntvl=hold_interval,
localEpAgeIntvl=local_ep_interval,
remoteEpAgeIntvl=remote_ep_interval,
moveFreq=move_frequency,
),
)
aci.get_diff(aci_class='fvEpRetPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| {
"content_hash": "6d08e7e6a5745cafecec03eead93686c",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 141,
"avg_line_length": 30.775862068965516,
"alnum_prop": 0.6241830065359477,
"repo_name": "thaim/ansible",
"id": "afcce7250aa375d1b5b0aba3725989ab3ef6de79",
"size": "10847",
"binary": false,
"copies": "8",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/aci/aci_tenant_ep_retention_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include
from . import views
app_name = 'accounts'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^update/$', views.update, name='update'),
url(r'^register/$', views.register, name='register'),
]
| {
"content_hash": "3273647d34f76369bc64b488eeb486a4",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 25.7,
"alnum_prop": 0.6459143968871596,
"repo_name": "yayoiukai/signalserver",
"id": "2a21cf98729c4e1b944140003e43e2f02ec066ed",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "147567"
},
{
"name": "HTML",
"bytes": "92442"
},
{
"name": "JavaScript",
"bytes": "11354"
},
{
"name": "Python",
"bytes": "178984"
},
{
"name": "Shell",
"bytes": "1246"
}
],
"symlink_target": ""
} |
from pycoin.key.BIP32Node import *
from . import config
from .interfaces.counterwalletseed import mnemonicToEntropy
import time
import json
import os
from .errors import PyPayWalletError
NETCODES = {"mainnet": "BTC", "testnet": "XTN"}
#To Do: enable autosweep, privkey_mode only
#sweep address function
#sweep branch function
class PyPayWallet(BIP32Node):
"""
Wallet wrapper around the Pycoin implementation. (Pycoin is a little heavier of a dependency than we need, but it already supports python3 and keypath-address handling).
The savePrivate() and savePublic() methods will save the public and/or private keys available to this wallet in an encrypted file using simplecrypt. Note: You do not need to have the privatekeys to generate new addresses.
The MasterKey or the PublicKey for the branch specified in the configfile must be loaded at startup, branches take default numbers. Currently hardened branch's are not supported (since hardened branches require the root-key to be a private-key which should not be used).
"""
@classmethod
def _getNetcode(cls):
return NETCODES['testnet' if config.TESTNET else 'mainnet']
@classmethod
def fromEntropy(cls, seed, netcode=None):
if not netcode:
netcode = cls._getNetcode()
return BIP32Node.from_master_secret(seed, netcode)
@classmethod
def fromMnemonic(cls, mnemonic, mnemonic_type = None, netcode=None):
if not netcode:
netcode = cls._getNetcode()
if not mnemonic_type:
mnemonic_type = config.DEFAULT_MNEMONIC_TYPE
exec("from .interfaces.%s import mnemonicToEntropy" %mnemonic_type)
seed = mnemonicToEntropy(mnemonic)
return cls.from_master_secret(seed, netcode=netcode)
@classmethod
def fromFile(cls, password=None, file_dir=None, file_name=None, netcode=None):
if file_dir is None:
file_dir = config.DATA_DIR
if file_name is None:
file_name = config.DEFAULT_WALLET_FILE
if netcode is None:
netcode = cls._getNetcode()
with open(os.path.join(file_dir, file_name), 'rb') as rfile:
data = rfile.read()
try:
if isinstance(data, bytes):
data = data.decode('utf-8')
wallet = json.loads(data)
except (TypeError, UnicodeDecodeError):
data = cls._decryptFile(password, data)
wallet = json.loads(data)
return cls.fromHwif((wallet.get('privkey') or wallet.get('pubkey')), keypath=wallet.get('keypath'), netcode=netcode)
fromEncryptedFile = fromFile
@classmethod
def _decryptFile(cls, password, data):
import simplecrypt
return simplecrypt.decrypt(password, data).decode('utf-8')
@classmethod
def fromHwif(cls, b58_str, keypath=None, netcode = None):
node = BIP32Node.from_hwif(b58_str)
return cls.fromBIP32Node(node, keypath, netcode)
#Go figure why BIP32Node won't instantiate from an instance of itself...
@classmethod
def fromBIP32Node(cls, W, keypath=None, netcode = None):
secret_exponent = (W._secret_exponent or None)
public_pair = (W._public_pair if not W._secret_exponent else None)
if not netcode:
netcode = cls._getNetcode() or W._netcode
return PyPayWallet(
netcode,
W._chain_code,
W._depth,
W._parent_fingerprint,
W._child_index,
secret_exponent=secret_exponent,
public_pair=public_pair,
keypath= (keypath or W.__dict__.get('keypath'))
)
def _toFile(self, data, file_dir = None, file_name =None, force = False ):
if file_dir is None:
file_dir = config.DATA_DIR
if file_name is None:
file_name = config.DEFAULT_WALLET_FILE
print(file_dir, file_name)
target = os.path.join(file_dir, file_name)
if os.path.isfile(target) and not force:
raise PyPayWalletError("Could not save to file because file already exists and force=True was not specified")
with open(target, 'wb') as wfile:
result = wfile.write(data)
assert(len(data) == result)
return result
def jsonForWallet(self, store_private=False):
return json.dumps({
"keypath": self.keypath,
"pubkey": self.hwif(),
"privkey": (self.hwif(True) if (self.is_private() and store_private ) else None)
}).encode('utf-8')
def toFile(self, password=None, store_private=False, **kwargs):
payload = self.jsonForWallet(store_private)
if password:
import simplecrypt
payload = simplecrypt.encrypt(password, payload)
self._toFile(payload, **kwargs)
def toEncryptedFile(self, password=None, store_private=False, **kwargs):
self.toFile(password, store_private, **kwargs)
def getCurrentAddress(self):
'''return the public address for the current path'''
return self.subkey_for_path(str(self.keypath)).address()
def getNewAddress(self):
'''return public address after incrementing path by 1'''
self.keypath.incr()
return self.getCurrentAddress()
def __init__(self, *args, keypath=None, **kwargs):
if not keypath:
keypath = config.KEYPATH or config.DEFAULT_KEYPATH
self.keypath = KeyPath(keypath)
BIP32Node.__init__(self, *args, **kwargs)
class KeyPath(list):
"""An address keypath object with an increment function"""
def __init__(self, l, *args):
if type(l) is str:
l = (int(i) for i in l.split('/'))
elif not l:
l =[]
list.__init__(self, l,*args)
def __repr__(self):
return "KeyPath('%s')" %self
def __str__(self):
return str('/'.join([str(i) for i in self]))
def incr(self, x=1, pos=-1):
'''When called with no arguments increments the right-most path by one'''
self[pos] += (x if self[pos] >= 0 else -x)
def set_pos(self, x, pos):
self[pos] = int(x)
# def dmc(x, y):
# x.__dict__[y.__name__] = y.__get__(x, x.__class__)
| {
"content_hash": "e247cfaa336fe956690da4ffb62cf17f",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 274,
"avg_line_length": 38.06172839506173,
"alnum_prop": 0.6312033733376581,
"repo_name": "pik/pypayd",
"id": "7669ce16e2dbe0da142bf48823a38afed8b8873d",
"size": "6166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypayd/wallet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90816"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [
None,
nn_ops.conv2d_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format")),
nn_ops.conv2d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format")), None,
nn_ops.conv2d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format")
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format")
return [
None,
nn_ops.conv3d_backprop_filter_v2(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(
grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format")
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format), None,
nn_ops.conv3d(
op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keepdims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad,
gen_nn_ops.bias_add_grad(
out_backprop=received_grad, data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops.relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops.elu_grad(grad, op.outputs[0]),
array_ops.where(elu_x < 0, grad * op.inputs[0],
array_ops.zeros(
shape=array_ops.shape(elu_x), dtype=elu_x.dtype)))
@ops.RegisterGradient("SeluGrad")
def _SeluGradGrad(op, grad):
x = op.inputs[1]
scale_alpha = 1.7580993408473768599402175208123
return (gen_nn_ops.elu_grad(grad, op.outputs[0]),
array_ops.where(x < 0.,
gen_nn_ops.elu_grad(grad,
op.outputs[0] + scale_alpha),
array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops.relu6_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6Grad")
def _Relu6GradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu6_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops.elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Selu")
def _SeluGrad(op, grad):
return gen_nn_ops.selu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops.softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad]):
ddy = gen_nn_ops.softplus_grad(grad, x)
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops.softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if context.executing_eagerly():
# TODO(apassos) add an efficient way to detect eager zeros here.
return False
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
logits = op.inputs[0]
if grad_grad is not None and not IsZero(grad_grad):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(grad_grad[:, None, :], softmax[:, :, None]), axis=1)) *
softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits))
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1],
message="Currently there is no way to take the second "
"derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
"implementation's interaction with tf.gradients()")
return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
dilations = op.get_attr("dilations")
strides = op.get_attr("strides")
padding = op.get_attr("padding")
use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
data_format = op.get_attr("data_format")
shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
return [
nn_ops.conv2d_backprop_input(
shape_0,
op.inputs[1],
grad,
dilations=dilations,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format),
nn_ops.conv2d_backprop_filter(
op.inputs[0],
shape_1,
grad,
dilations=dilations,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [
nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [
gen_nn_ops.lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius, bias,
alpha, beta)
]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops.avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops.max_pool_grad(
op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolV2")
def _MaxPoolGradV2(op, grad):
ksize = op.inputs[1]
strides = op.inputs[2]
return gen_nn_ops.max_pool_grad_v2(
op.inputs[0],
op.outputs[0],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
return gen_nn_ops.max_pool_grad_with_argmax(
op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradV2")
def _MaxPoolGradGradV2(op, grad):
ksize = op.inputs[3]
strides = op.inputs[4]
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad_v2(
op.inputs[0],
op.inputs[1],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None)
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
return gen_nn_ops.fractional_max_pool_grad(
op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
def _BaseFusedBatchNormGrad(op, use_v2, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
use_v2: Boolean indicating whether to use the V2 version of the fused batch
norm gradient.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)
in freeze mode.
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon)) in training mode;
sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))
in freeze mode.
grad_offset: gradient for offset, which is sum(grad_y) in training mode;
sum(grad_y) in freeze mode.
"""
x = op.inputs[0]
grad_y = grad[0]
scale = op.inputs[1]
epsilon = op.get_attr("epsilon")
data_format = op.get_attr("data_format")
is_training = op.get_attr("is_training")
grad_fun = (
gen_nn_ops.fused_batch_norm_grad_v2
if use_v2 else gen_nn_ops.fused_batch_norm_grad)
if is_training:
return grad_fun(
grad_y,
x,
scale,
op.outputs[3],
op.outputs[4],
epsilon=epsilon,
data_format=data_format,
is_training=is_training)
else:
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
if data_format == b"NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1])
dx, dscale, doffset, _, _ = grad_fun(
grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon=epsilon,
data_format="NHWC",
is_training=is_training)
if data_format == b"NCHW":
dx = array_ops.transpose(dx, [0, 3, 1, 2])
return dx, dscale, doffset, None, None
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
return _BaseFusedBatchNormGrad(op, False, *grad)
@ops.RegisterGradient("FusedBatchNormV2")
def _FusedBatchNormV2Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, True, *grad)
def _BatchNormGrad(grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon,
data_format,
is_training=True):
"""Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 dimensions for gradient for y.
x: A `Tensor` of 4 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when
is_training=False.
pop_var: A `Tensor` of 1 dimension for the population variance. Only used
when is_training=False.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
is_training: A bool value to indicate the operation is for training
(default)
or inference.
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
"""
x_dtype = x.dtype.base_dtype
if x_dtype == dtypes.float16:
# float16 math is too imprecise, so we do the batch norm gradient
# computations in float32.
x = math_ops.cast(x, dtypes.float32)
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b"NHWC":
keepdims = False
reduce_axis = [0, 1, 2]
else:
keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b"NCHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
else:
if data_format == b"NHWC":
reduce_axis = [0, 1, 2]
else:
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(
grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
@ops.RegisterGradient("FusedBatchNormGrad")
def _FusedBatchNormGradGrad(op, *grad):
"""Returns the gradients for the 3 inputs of FusedBatchNormGrad.
Args:
op: The FusedBatchNormGradOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_grad_x, grad[1] as grad_grad_scale,
grad[2] as grad_grad_offset.
Returns:
A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y
is the gradient for grad_y, grad_x the gradient for x, grad_scale the
gradient for scale.
"""
data_format = op.get_attr("data_format")
epsilon = op.get_attr("epsilon")
is_training = op.get_attr("is_training")
grad_y = op.inputs[0]
x = op.inputs[1]
scale = op.inputs[2]
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
grad_grad_x = grad[0]
grad_grad_scale = grad[1]
grad_grad_offset = grad[2]
grad_x, grad_scale, grad_offset = _BatchNormGrad(
grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)
grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]
grad_grad_y, grad_x, grad_scale = gradients_impl.gradients(
[grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)
return grad_grad_y, grad_x, grad_scale, None, None
@ops.RegisterGradient("FusedBatchNormGradV2")
def _FusedBatchNormGradGradV2(op, *grad):
return _FusedBatchNormGradGrad(op, *grad)
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [
array_ops.reshape(
sparse_ops.sparse_to_dense(
ind,
array_ops.reshape(math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False), in_shape),
array_ops.zeros([], dtype=dtypes.int32)
]
@ops.RegisterGradient("NthElement")
def _NthElementGrad(op, grad):
"""Return the gradients for NthElement.
Args:
op: The NthElementOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the NthElementOp
Returns:
A list of two tensors, the first being the gradient w.r.t. the input,
the second being the gradient w.r.t. the N (None).
"""
input = op.inputs[0] # pylint: disable=redefined-builtin
output = op.outputs[0]
# Compute the number of elements which equal to output in each reduction
# dimension. If there are multiple elements then the gradient will be
# divided between them.
indicators = math_ops.cast(
math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)
grad = array_ops.expand_dims(grad, -1)
num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)
return [math_ops.div(indicators, num_selected) * grad, None]
| {
"content_hash": "779a91e670951daa479f2d5726102d55",
"timestamp": "",
"source": "github",
"line_count": 1009,
"max_line_length": 80,
"avg_line_length": 33.52824578790882,
"alnum_prop": 0.6314809340821755,
"repo_name": "ryfeus/lambda-packs",
"id": "4af5bd26dd80b984b1c898411c2a23827bed1b4b",
"size": "34519",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/nn_grad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from django import forms
from us_ignite.testbeds.models import Testbed, NetworkSpeed
EMPTY_LABEL = u'- Select one -'
def get_experimentation_choices():
return [('', EMPTY_LABEL)] + list(Testbed.EXPERIMENTATION_CHOICES)
class TestbedFilterForm(forms.Form):
network_speed = forms.ModelChoiceField(
queryset=NetworkSpeed.objects.all(), required=False,
empty_label=EMPTY_LABEL)
experimentation = forms.ChoiceField(
choices=get_experimentation_choices(), required=False)
passes_homes = forms.IntegerField(required=False, min_value=0)
passes_business = forms.IntegerField(required=False, min_value=0)
passes_anchor = forms.IntegerField(required=False, min_value=0)
def clean(self):
"""Make sure at least one of the values is selected."""
if not any(self.cleaned_data.values()):
raise forms.ValidationError(u'Select at least one of the fields.')
return self.cleaned_data
| {
"content_hash": "4c585d7b09c8f309dd77927820f3c521",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 36.88461538461539,
"alnum_prop": 0.7090719499478624,
"repo_name": "us-ignite/us_ignite",
"id": "b3ffe191428640414aed3d43fe0d7cb6b461a5f6",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "us_ignite/testbeds/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "590320"
},
{
"name": "HTML",
"bytes": "920235"
},
{
"name": "JavaScript",
"bytes": "109759"
},
{
"name": "Nginx",
"bytes": "3047"
},
{
"name": "Pascal",
"bytes": "48"
},
{
"name": "Puppet",
"bytes": "53455"
},
{
"name": "Python",
"bytes": "1321882"
},
{
"name": "Ruby",
"bytes": "370509"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/dungeon/geonosian_mad_bunker/shared_relic_gbb_honey_carafe.iff"
result.attribute_template_id = -1
result.stfName("item_n","relic_gbb_honey_carafe")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ba9eaf88ace14c3bbaea4ef39d34e686",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 104,
"avg_line_length": 26.46153846153846,
"alnum_prop": 0.7093023255813954,
"repo_name": "obi-two/Rebelion",
"id": "e76f389561c6ab41bd92cec5a81e5b230539ec0b",
"size": "489",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/loot/dungeon/geonosian_mad_bunker/shared_relic_gbb_honey_carafe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from custom_exception import NoResultFound
def get_result_list(target_jsonld):
return target_jsonld['itemListElement'];
def get_first_result_item(target_jsonld):
result_list = get_result_list(target_jsonld)
try:
return result_list[0]
except IndexError:
return {}
def get_first_result(target_jsonld):
first_result_item = get_first_result_item(target_jsonld)
try:
return first_result_item['result']
except KeyError:
raise NoResultFound()
def get_article_body_from_result(target_result):
detailed_desc = target_result['detailedDescription']
return detailed_desc['articleBody']
| {
"content_hash": "3719b1265cc8cccbe2620c61a8d4cd37",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 58,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.7450980392156863,
"repo_name": "lahsivjar/jarvis-kube",
"id": "d833e4c8c57976cfd7eb756a96ba223f0faab3cd",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/python/google-knowledge-graph/src/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1068"
},
{
"name": "Python",
"bytes": "25319"
}
],
"symlink_target": ""
} |
import ezdxf
from ezdxf.math import Vec3
from ezdxf.enums import MTextEntityAlignment
# This is the only way to create MTEXT entities where the "width" attribute
# is missing or equals 0.
# MTEXT entities created by AutoCAD or BricsCAD ALWAYS have a
# "width" attribute > 0.
CONTENT = "This is a long MTEXT line without line wrapping!\\PThe second line."
doc = ezdxf.new(setup=True)
doc.layers.new("MTEXT", dxfattribs={"color": ezdxf.const.RED})
msp = doc.modelspace()
attribs = {
"char_height": 0.7,
"style": "OpenSans",
"layer": "MTEXT",
}
def add_mtext(
location: Vec3, attachment_point: MTextEntityAlignment, size: float = 2
):
msp.add_line(location - (size, 0), location + (size, 0))
msp.add_line(location - (0, size), location + (0, size))
mtext = msp.add_mtext(CONTENT, attribs)
mtext.set_location(location, attachment_point=attachment_point)
params = [
((0, 0), MTextEntityAlignment.BOTTOM_LEFT),
((100, 0), MTextEntityAlignment.BOTTOM_CENTER),
((200, 0), MTextEntityAlignment.BOTTOM_RIGHT),
((0, 100), MTextEntityAlignment.MIDDLE_LEFT),
((100, 100), MTextEntityAlignment.MIDDLE_CENTER),
((200, 100), MTextEntityAlignment.MIDDLE_RIGHT),
((0, 200), MTextEntityAlignment.TOP_LEFT),
((100, 200), MTextEntityAlignment.TOP_CENTER),
((200, 200), MTextEntityAlignment.TOP_RIGHT),
]
for location, attachment_point in params:
add_mtext(Vec3(location), attachment_point)
doc.set_modelspace_vport(300, (100, 100))
doc.saveas("mtext_simple_all_alignments.dxf")
| {
"content_hash": "1dd44a162b12e4b8d5a7afcd4a492ce2",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 32.723404255319146,
"alnum_prop": 0.6996098829648895,
"repo_name": "mozman/ezdxf",
"id": "ebba6d82d79844163ede28bc47159f7dbcec2331",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples_dxf/create_mtext_simple_all_alignments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class HoverdistanceValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="hoverdistance", parent_name="layout", **kwargs):
super(HoverdistanceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "d619c734ae20a6e4b2e117871298edbe",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 38.30769230769231,
"alnum_prop": 0.606425702811245,
"repo_name": "plotly/python-api",
"id": "b2a13ceb442085955675703a43f5f38c63d48438",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/_hoverdistance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Team, TeamMembership
class TeamMembershipInline(admin.StackedInline):
model = TeamMembership
raw_id_fields = ('user', 'team',)
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'created', 'member_count')
inlines = [TeamMembershipInline]
admin.site.register(Team, TeamAdmin)
| {
"content_hash": "2ab8f784c3379f2daa535706351cd5f9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 22.4375,
"alnum_prop": 0.7298050139275766,
"repo_name": "stefanw/froide",
"id": "bd9346dc479e03302643ee0b3e01d265517a39e5",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "froide/team/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17807"
},
{
"name": "HTML",
"bytes": "161162"
},
{
"name": "Java",
"bytes": "287939"
},
{
"name": "JavaScript",
"bytes": "1325034"
},
{
"name": "Makefile",
"bytes": "329"
},
{
"name": "Python",
"bytes": "1642783"
},
{
"name": "Shell",
"bytes": "1621"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be avilable on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5!x61m1lwmhjjwg!_u!ez=64-5q%bch4=%nlb4f_hbf4esq27%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'dj_apache.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'dj_apache.logview',
)
| {
"content_hash": "32479f27bcb9f0bb63a88755338092b6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 101,
"avg_line_length": 35.53164556962025,
"alnum_prop": 0.7060919130744567,
"repo_name": "lluxury/P_U_S_A",
"id": "c1851f00b9090f20cc173fa7a929b783bee24b4c",
"size": "2849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "11_gui/code/dj_apache/settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "6274"
},
{
"name": "PLpgSQL",
"bytes": "1421"
},
{
"name": "Perl",
"bytes": "999"
},
{
"name": "Python",
"bytes": "1322191"
},
{
"name": "Roff",
"bytes": "6"
},
{
"name": "Shell",
"bytes": "1055"
}
],
"symlink_target": ""
} |
import feedparser
import sqlite3, psycopg2
import datetime, time
import os
import string
from random import choice, randint
import lxml.html
import uuid
import random
def fill_start_data_news():
#db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
id = 1
news_title = "Test news"
news_category_id = 1
news_post_date = datetime.datetime.now()
news_post_text = "Test news text"
news_post_text_translate = "Test news text translate"
news_portal_name_id = 1
news_company_owner_id = 1
news_author_id = 1
news_main_cover = ""
news_likes = 0
news_dislikes = 0
query = """INSERT INTO news(news_title, news_category_id, news_post_date, news_post_text, news_post_text_translate, news_portal_name_id, news_company_owner_id, news_author_id, news_main_cover, news_likes, news_dislikes) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
data_query = (news_title, news_category_id, news_post_date, news_post_text, news_post_text_translate, news_portal_name_id, news_company_owner_id, news_author_id, news_main_cover, news_likes, news_dislikes)
cursor.execute(query, data_query)
db.commit()
db.close()
def get_feed_urls():
# with open("rssurls.txt", "r") as file:
# url_feeds = file.readlines()
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query = "SELECT link FROM rss_channels"
# data_query = ()
cursor.execute(query)
url_feeds = cursor.fetchall()
db.close()
print(url_feeds)
urls = []
for i in range(len(url_feeds)):
print(url_feeds[i])
# url_feeds[i] = url_feeds[i]#[:-1]
urls.append(url_feeds[i][0])
# return url_feeds
return urls
def parse_current_url(url=''):
url_parse = feedparser.parse(url)
return [i for i in url_parse.entries]
def last_element(feed):
args = {"title": feed[0].title, "link": feed[0].link, "main_cover": ""}
keys = feed[0].keys()
# AUTHOR
if "author" in keys: args["author"] = feed[0].author
else: args["author"] = ""
# CONTENT
if "content" in keys: args["content"] = feed[0].content[0]["value"]
else: args["content"] = ""
# DATE
if "date" in keys: args["date"] = feed[0].published
elif "published" in keys: args["date"] = feed[0]["published"]
elif "updated" in keys: args["date"] = feed[0]["updated"]
else: args["date"] = feed[0]["updated"]
# DESCRIPTION
if "description" in keys: args["description"] = feed[0].description
elif "summary_detail" in keys: args["description"] = feed[0]['summary_detail']['value']
else: args["description"] = feed[0]['summary']
return args
def set_user_rss_read(user_id, rss_news_id, rss_portal_id):
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query = "INSERT INTO user_rss_news_read(user_id, rss_news_id, rss_portal_id, read) VALUES(%s,%s,%s,%s)"
data_query = (user_id, rss_news_id, rss_portal_id, False)
cursor.execute(query, data_query)
db.commit()
return 0
def get_amount_of_user_readers(portal_id):
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query = "SELECT user_id FROM user_rss_news UR WHERE portal_id=%s AND UR.check=TRUE"
data_query = [portal_id]
cursor.execute(query, data_query)
amount = cursor.fetchall()
# print("Count: ", len(amount), "\nUsers with IDs: ", amount)
# for i in amount:
# print(i[0])
return [len(amount), amount]
def parse_img(url):
import urllib.request as r
from urllib import error
try:
return lxml.html.parse(r.urlopen(url)).xpath('//img')
except error.HTTPError:
return False
def result(url):
if parse_img(url) == False:
return False
else:
array = []
for i in parse_img(url):
if i.get('width') and i.get('height'):
array.append({'size': str(int(i.get('width'))*int(i.get('height'))), 'src': i.get('src')})
else:
pass
return array
def connect_to_db(urls):
# db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
import uuid
cursor = db.cursor()
num = 0
for url in urls:
num += 1
print("#%s Current url: %s" % (num, url))
data = last_element(parse_current_url(url=url))
# print(data["date"])
try:
new_date = data["date"].split()
time = new_date[4].split(":")
if len(new_date[1]) > len(new_date[2]):
tmp = new_date[1][:3]
new_date[1] = new_date[2][:2]
new_date[2] = tmp
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
mon = months.index(new_date[2])+1
date_posted = datetime.datetime(int(new_date[3][:4]), mon, int(new_date[1]), int(time[0]), int(time[1]), int(time[2]))
except IndexError:
date_posted = data["date"]
query_0 = "SELECT ID FROM news_rss WHERE link=%s"
data_query_0 = [data["link"]]
cursor.execute(query_0, data_query_0)
count = cursor.fetchall()
import re
match_2 = re.findall(r'src=\"(.*?)\"\s.*/>', data["content"])
if len(match_2) >= 1:
# a = re.findall(r'([=\-_.:](\d+x\d+)+)', str(match_2[0]))[0]
data["main_cover"] = str(match_2[0])#.replace(a, '')
else:
# a = re.findall(r'([=\-_.:](\d+x\d+)+)', str(match_2))[0]
data["main_cover"] = str(match_2)#.replace(a, '')
if len(match_2) == 0:
match_3 = re.findall(r'src=\"(.*?)\"\s.*/>', data["description"])
a = str(match_3)
if len(match_3) >= 1:
data["main_cover"] = str(match_3[0])#.replace(a, '')
else:
# a = re.findall(r'([=\-_.:](\d+x\d+)+)', str(match_2))[0]
data["main_cover"] = str(match_3)#.replace(a, '')
data["content"] = data["content"].replace("\xa0", " ").replace("%", "%%").replace("> ", "> ").replace(" </", "</").replace(" <", " <").replace("\n<", "<").replace("\n", " ").replace("'", "’")
data["title"] = data["title"].replace('"', '').replace("\xa0", " ").replace("%", "%%").replace("> ", "> ").replace(" </", "</").replace(" <", " <").replace("\n<", "<").replace("\n", " ").replace("'", "’")
data["description"] = data["description"].replace("\xa0", "").replace("%", "%%").replace("> ", "> ").replace(" </", "</").replace(" <", " <").replace("\n<", "<").replace("\n", " ").replace("'", "’")
# TEST
# match = re.findall(r'<.*?>', data["description"])
# for i in match:
# data["description"] = data["description"].replace(i, "")
############## Parse all images from current url #######################
# def parse_img(url):
# return lxml.html.parse(url).xpath('//img')
# def print_matches(url):
# for i in parse_img(url):
# print(i.get('width'), i.get('height'), i.get('src'))
# def result(url):
# array = []
# for i in parse_img(url):
# if i.get('width') and i.get('height'):
# array.append({'size':str(int(i.get('width'))*int(i.get('height'))), 'src': i.get('src')})
# else:
# pass
# return array
#
if data["main_cover"] == '[]':
end = result(data['link'])
if end != False:
max_item = 0
for i in range(len(end)):
if int(end[i]['size']) > max_item:
max_item = int(end[i]['size'])
for i in range(len(end)):
if int(end[i]['size']) == max_item:
current_cover = end[i]['src']
data["main_cover"] = current_cover
############################################################################
match_tabs = re.findall(r'[\s]{2,}', data["description"])
for i in match_tabs:
data["description"] = data["description"].replace(i, " ")
data["description"] = data["description"].replace("\n", "").replace("\t", "")
query_for_rss = "SELECT * FROM rss_portals"
cursor.execute(query_for_rss)
portals_list = cursor.fetchall()
for current_portal in portals_list:
if current_portal[2] in data["link"]:
current_rss_news_id = current_portal[0] # CURRENT PORTAL ID
current_rss_news_cat_id = current_portal[7]
if len(count) == 0:
#cursor.execute("""INSERT INTO news_rss(title, date_posted, post_text, link, portal_name_id, category_id, content_value, author) VALUES(?, ?, ?, ?, ?, ?, ?, ?)""",(data["title"], datetime.datetime(int(new_date[3]), 11, int(new_date[1]), int(time[0]), int(time[1]), int(time[2])), data["description"], data["link"], 1, 1, data["content"], data["author"]))
query = """INSERT INTO news_rss(title, date_posted, post_text, link, portal_name_id, category_id, content_value, author, nuid) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)"""
data_query = (data["title"],
date_posted,
data["description"],
data["link"],
current_rss_news_id,
current_rss_news_cat_id,
data["content"],
data["author"],
''.join(choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _
in range(33)))
cursor.execute(query, data_query)
query_2 = "SELECT ID FROM news_rss WHERE title=%s"
data_query_2 = [data["title"]]
cursor.execute(query_2, data_query_2)
current_rss_id = cursor.fetchone()[0]
query_3 = "INSERT INTO rss_news_covers(rss_news_id, main_cover) VALUES (%s, %s)"
data_query_3 = (int(current_rss_id), data["main_cover"])
cursor.execute(query_3, data_query_3)
query_rss_portal = "UPDATE rss_portals SET cover=%s WHERE id=%s"
query_rss_portal_data=(data["main_cover"], int(current_rss_news_id))
cursor.execute(query_rss_portal, query_rss_portal_data)
db.commit()
instance = get_amount_of_user_readers(current_rss_news_id)
user_amount = instance[0]
users = [i[0] for i in instance[1]]
for i in range(len(users)):
set_user_rss_read(users[i], current_rss_id, current_rss_news_id)
print("Inserted from: ", url)
else:
print("Already exists: ", url)
print("================END ONE MORE LOOP====================")
db.close()
def fill_rss_table():
import json
#db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("dictionary_portals.json", encoding="utf-8-sig") as json_file_list:
json_data_list = list(json.load(json_file_list))
with open("dictionary_portals.json", encoding="utf-8-sig") as json_file:
json_data = json.load(json_file)
query_0 = "SELECT * FROM rss_portals"
cursor.execute(query_0)
list_cur = cursor.fetchall()
query_1 = "SELECT * FROM news_rss"
cursor.execute(query_1)
rss = cursor.fetchall()
end = len(rss)*len(list_cur)
cur_iter = 0
for i in range(len(rss)):
for j in range(len(list_cur)):
cur_iter += 1
if str(list_cur[j][2]) in str(rss[i][6]):
id = str(rss[i][0])
query = "UPDATE news_rss SET portal_name_id=%s WHERE id=%s"
data_query = (str(list_cur[j][0]), id)
cursor.execute(query, data_query)
db.commit()
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "When total end is ", end)
else:
continue
db.close()
def fill_rss_portals():
import json
#db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("dictionary_portals.json", encoding="utf-8-sig") as file_list:
file_list = json.load(file_list)
with open("dictionary_portals.json", encoding="utf-8-sig") as file:
portals = json.load(file)
end = len(portals)
print(end)
cur_iter = 0
for i in range(1,len(file_list)):
i = i+1
query_0 = "SELECT ID FROM rss_portals WHERE portal=%s"
data_query_0 = [file_list['object-%s'%i]["name"]]
cursor.execute(query_0, data_query_0)
count = cursor.fetchall()
if len(count) == 0:
cur_iter += 1
categories = {"Technology": 1, "Entertainment": 2, "Auto": 3, "Space": 4, "BIO": 5}
query = "INSERT INTO rss_portals(portal, portal_base_link, follows, description, cover, favicon, verbose_name, category_id, puid) VALUES(%s, %s, %s, %s, %s,%s,%s,%s, %s)"
data_query = (file_list['object-%s'%i]["name"],
file_list['object-%s'%i]["base_link"],
0,
file_list['object-%s'%i]["description"],
file_list['object-%s'%i]["cover"],
file_list['object-%s'%i]["favicon"],
file_list['object-%s'%i]["verbose"],
categories[file_list['object-%s'%i]["category"]],
str(uuid.uuid4()),
)
cursor.execute(query, data_query)
db.commit()
# Add feed to each portal
query_test = "SELECT DISTINCT ON (ID) ID FROM rss_portals WHERE portal_base_link=%s"
query_test_data = [file_list['object-%s'%i]['base_link']]
cursor.execute(query_test, query_test_data)
rss_id = cursor.fetchall()
query_channel = "INSERT INTO rss_channels(portal_id, link) VALUES(%s, %s)"
query_channel_data = (rss_id[0], file_list['object-%s'%i]["feed"])
cursor.execute(query_channel, query_channel_data)
db.commit()
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "When total end is ", end)
else:
# Add feed to each portal
query_test = "SELECT DISTINCT ON (ID) ID FROM rss_portals WHERE portal=%s"
query_test_data = [file_list['object-%s'%i]['name']]
cursor.execute(query_test, query_test_data)
rss_id = cursor.fetchall()
query_channel = "INSERT INTO rss_channels(portal_id, link) VALUES(%s, %s)"
query_channel_data = (rss_id[0], file_list['object-%s'%i]["feed"])
cursor.execute(query_channel, query_channel_data)
db.commit()
db.close()
def fill_companies():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("c_2.json", encoding="utf-8-sig") as file_list:
file_list = list(json.load(file_list))
with open("c_2.json", encoding="utf-8-sig") as file:
companies = json.load(file)
end = len(companies)
cur_iter = 0
current_category_id = 0
for i in range(end):
if companies[file_list[i]]['category'] == "technology": current_category_id = 1
if companies[file_list[i]]['category'] == "entertainment": current_category_id = 2
if companies[file_list[i]]['category'] == "auto": current_category_id = 3
if companies[file_list[i]]['category'] == "space": current_category_id = 4
if companies[file_list[i]]['category'] == "bio" or companies[file_list[i]]['category'] == "bit" : current_category_id = 5
description = ""
cur_iter += 1
query_check = "SELECT * FROM companies WHERE site=%s"
data_query_check = [companies[file_list[i]]['site']]
cursor.execute(query_check, data_query_check)
check = cursor.fetchall()
print(check)
print(len(check))
if len(check) > 0:
pass
else:
query = "INSERT INTO companies(name, verbose_name, site, category_id, logo, description) VALUES(%s, %s, %s, %s, %s, %s)"
data_query = (companies[file_list[i]]['name'], companies[file_list[i]]['verbose'],
companies[file_list[i]]['site'], current_category_id,
companies[file_list[i]]['logo'], description)
cursor.execute(query, data_query)
db.commit()
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "Dealed with ", companies[file_list[i]]['name'])
db.close()
def fill_portals():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query_portals = "INSERT INTO news_portal(portal_name, portal_base_link) VALUES(%s,%s)"
query_data = ("Appleinsider", "appleinsider.ru")
cursor.execute(query_portals, query_data)
db.commit()
db.close()
def fill_news():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("news_zaharov_2.json", encoding="utf-8-sig") as file_list:
file_list = list(json.load(file_list))
with open("news_zaharov_2.json", encoding="utf-8-sig") as file:
news = json.load(file)
end = len(news)
cur_iter = 0
for i in range(end):
try:
for j in [1,2,3,4,5]:
# if news[file_list[i]]['category'] == "technology" and news[file_list[i]]['date'] != "date":
cur_iter += 1
news_title_english = "[eng]"+news[file_list[i]]["title"]
news_title_russian = "[rus]"+news[file_list[i]]["title"]
news_title_chinese = "[ch]"+news[file_list[i]]["title"]
news_category_id = j # Technology
news_post_date = news[file_list[i]]["date"]
teaser_english = "[eng] Teaser"
teaser_russian = "[rus] Teaser"
teaser_chinese = "[ch] Teaser"
news_post_text_english = news[file_list[i]]["text"]
news_post_text_russian = news[file_list[i]]["text"]
news_post_text_chinese = news[file_list[i]]["text"]
news_portal_name_id = 1 # Insydia
news_company_owner_id = 1 # Insydia
news_author_id = 1 # Saqel
news_main_cover = "" # None
news_likes = 0
news_dislikes = 0
photo = ""
news_tags = "{}"
slug = "%s-b-a-a-%s" % (j, i)
query_set = "INSERT INTO news(news_title_english, news_title_russian, news_title_chinese, news_category_id, news_post_date, news_post_text_english, " \
"teaser_english, teaser_russian, teaser_chinese, news_post_text_russian, news_post_text_chinese, news_portal_name_id, news_company_owner_id, news_author_id, " \
"news_main_cover, photo, news_likes, news_dislikes, news_tags, slug) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
data_query_set = (news_title_english,
news_title_russian,
news_title_chinese,
news_category_id,
news_post_date,
news_post_text_english,
teaser_english,
teaser_russian,
teaser_chinese,
news_post_text_russian,
news_post_text_chinese,
news_portal_name_id,
news_company_owner_id,
news_author_id,
news_main_cover,
photo,
news_likes,
news_dislikes,
news_tags,
slug)
cursor.execute(query_set, data_query_set)
db.commit()
#print(cur_iter, data_query_set)
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "Dealed with ", news_title_english)
except KeyError:
print(news[file_list[i]])
db.close()
def save_rss_news():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query_set = "SELECT * FROM news_rss"
cursor.execute(query_set)
db.commit()
data = cursor.fetchall()
end = len(data)
count = 0
with open("save_rss.json", "a+", encoding="utf-8") as file:
file.write("{")
for i in range(len(data)):
count += 1
file.write('"object":')
data_dict = {}
data_dict["title"] = data[i][1]#["title"]
data_dict["date_posted"] = data[i][2].isoformat()#["date_posted"]
data_dict["post_text"] = data[i][3]#["post_text"]
data_dict["portal_name"] = data[i][4]#["portal_name"]
data_dict["category"] = data[i][5]#["category"]
data_dict["link"] = data[i][6]#["link"]
data_dict["author"] = data[i][7]#["author"]
data_dict["content_value"] = data[i][8]#["content_value"]
file.write(json.dumps(data_dict))
file.write(",")
print("Saving RSS # ", count, " success. In total - ", end, " items")
file.write("}")
db.close()
def create_categories():
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
categories = ["Technology", "Entertainment", "Auto", "Space", "BIO"]
for i in categories:
query_set = "INSERT into news_category(category_name) VALUES(%s)"
query_data = [i]
cursor.execute(query_set, query_data)
db.commit()
print("Category ", i, "added")
db.close()
def work_func():
urls_of_portals = get_feed_urls()
print("1. Fill Rss Portals\n2. Syndicate news\n3. Fill Companies\n4. Fill news\n5. Save RSS\n6.User readers\n7. Create categories\n8. Fill portals")
x = int(input("What can I help you? Enter number: "))
if x == 1:
fill_rss_portals()
elif x == 2:
while True:
# try:
connect_to_db(urls=urls_of_portals)
time.sleep(1)
# except IndexError:
# pass
elif x == 3:
fill_companies()
elif x == 4:
fill_news()
elif x == 5:
save_rss_news()
elif x == 6:
get_amount_of_user_readers(3)
elif x == 7:
create_categories()
elif x == 8:
fill_portals()
else:
import sys
print("Good bye!")
sys.exit(0)
#fill_start_data_news()
while True:
work_func()
| {
"content_hash": "87e65bee93d334f40ce62820366cfc54",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 366,
"avg_line_length": 40.2901023890785,
"alnum_prop": 0.5232952138924185,
"repo_name": "eprivalov/sendec",
"id": "c17f19e6e69500cb817e38e75274361fd1b2b376",
"size": "23610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_rss/rss.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "453604"
},
{
"name": "HTML",
"bytes": "3542936"
},
{
"name": "JavaScript",
"bytes": "1202382"
},
{
"name": "Python",
"bytes": "361326"
}
],
"symlink_target": ""
} |
"""tests for the astng builder and rebuilder module"""
import unittest
import sys
from os.path import join, abspath, dirname
from logilab.common.testlib import TestCase, unittest_main
from pprint import pprint
from logilab.astng import builder, nodes, InferenceError, NotFoundError
from logilab.astng.nodes import Module
from logilab.astng.bases import YES, BUILTINS
from logilab.astng.manager import ASTNGManager
MANAGER = ASTNGManager()
from unittest_inference import get_name_node
import data
from data import module as test_module
DATA = join(dirname(abspath(__file__)), 'data')
class FromToLineNoTC(TestCase):
astng = builder.ASTNGBuilder().file_build(join(DATA, 'format.py'))
def test_callfunc_lineno(self):
stmts = self.astng.body
# on line 4:
# function('aeozrijz\
# earzer', hop)
discard = stmts[0]
self.assertIsInstance(discard, nodes.Discard)
self.assertEqual(discard.fromlineno, 4)
self.assertEqual(discard.tolineno, 5)
callfunc = discard.value
self.assertIsInstance(callfunc, nodes.CallFunc)
self.assertEqual(callfunc.fromlineno, 4)
self.assertEqual(callfunc.tolineno, 5)
name = callfunc.func
self.assertIsInstance(name, nodes.Name)
self.assertEqual(name.fromlineno, 4)
self.assertEqual(name.tolineno, 4)
strarg = callfunc.args[0]
self.assertIsInstance(strarg, nodes.Const)
self.assertEqual(strarg.fromlineno, 5) # no way for this one (is 4 actually)
self.assertEqual(strarg.tolineno, 5)
namearg = callfunc.args[1]
self.assertIsInstance(namearg, nodes.Name)
self.assertEqual(namearg.fromlineno, 5)
self.assertEqual(namearg.tolineno, 5)
# on line 10:
# fonction(1,
# 2,
# 3,
# 4)
discard = stmts[2]
self.assertIsInstance(discard, nodes.Discard)
self.assertEqual(discard.fromlineno, 10)
self.assertEqual(discard.tolineno, 13)
callfunc = discard.value
self.assertIsInstance(callfunc, nodes.CallFunc)
self.assertEqual(callfunc.fromlineno, 10)
self.assertEqual(callfunc.tolineno, 13)
name = callfunc.func
self.assertIsInstance(name, nodes.Name)
self.assertEqual(name.fromlineno, 10)
self.assertEqual(name.tolineno, 10)
for i, arg in enumerate(callfunc.args):
self.assertIsInstance(arg, nodes.Const)
self.assertEqual(arg.fromlineno, 10+i)
self.assertEqual(arg.tolineno, 10+i)
def test_function_lineno(self):
stmts = self.astng.body
# on line 15:
# def definition(a,
# b,
# c):
# return a + b + c
function = stmts[3]
self.assertIsInstance(function, nodes.Function)
self.assertEqual(function.fromlineno, 15)
self.assertEqual(function.tolineno, 18)
return_ = function.body[0]
self.assertIsInstance(return_, nodes.Return)
self.assertEqual(return_.fromlineno, 18)
self.assertEqual(return_.tolineno, 18)
if sys.version_info < (3, 0):
self.assertEqual(function.blockstart_tolineno, 17)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
def test_decorated_function_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
@decorator
def function(
arg):
print (arg)
''', __name__, __file__)
function = astng['function']
self.assertEqual(function.fromlineno, 3) # XXX discussable, but that's what is expected by pylint right now
self.assertEqual(function.tolineno, 5)
self.assertEqual(function.decorators.fromlineno, 2)
self.assertEqual(function.decorators.tolineno, 2)
if sys.version_info < (3, 0):
self.assertEqual(function.blockstart_tolineno, 4)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
def test_class_lineno(self):
stmts = self.astng.body
# on line 20:
# class debile(dict,
# object):
# pass
class_ = stmts[4]
self.assertIsInstance(class_, nodes.Class)
self.assertEqual(class_.fromlineno, 20)
self.assertEqual(class_.tolineno, 22)
self.assertEqual(class_.blockstart_tolineno, 21)
pass_ = class_.body[0]
self.assertIsInstance(pass_, nodes.Pass)
self.assertEqual(pass_.fromlineno, 22)
self.assertEqual(pass_.tolineno, 22)
def test_if_lineno(self):
stmts = self.astng.body
# on line 20:
# if aaaa: pass
# else:
# aaaa,bbbb = 1,2
# aaaa,bbbb = bbbb,aaaa
if_ = stmts[5]
self.assertIsInstance(if_, nodes.If)
self.assertEqual(if_.fromlineno, 24)
self.assertEqual(if_.tolineno, 27)
self.assertEqual(if_.blockstart_tolineno, 24)
self.assertEqual(if_.orelse[0].fromlineno, 26)
self.assertEqual(if_.orelse[1].tolineno, 27)
def test_for_while_lineno(self):
for code in ('''
for a in range(4):
print (a)
break
else:
print ("bouh")
''', '''
while a:
print (a)
break
else:
print ("bouh")
''',
):
astng = builder.ASTNGBuilder().string_build(code, __name__, __file__)
stmt = astng.body[0]
self.assertEqual(stmt.fromlineno, 2)
self.assertEqual(stmt.tolineno, 6)
self.assertEqual(stmt.blockstart_tolineno, 2)
self.assertEqual(stmt.orelse[0].fromlineno, 6) # XXX
self.assertEqual(stmt.orelse[0].tolineno, 6)
def test_try_except_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
try:
print (a)
except:
pass
else:
print ("bouh")
''', __name__, __file__)
try_ = astng.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 7)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.orelse[0].fromlineno, 7) # XXX
self.assertEqual(try_.orelse[0].tolineno, 7)
hdlr = try_.handlers[0]
self.assertEqual(hdlr.fromlineno, 4)
self.assertEqual(hdlr.tolineno, 5)
self.assertEqual(hdlr.blockstart_tolineno, 4)
def test_try_finally_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
try:
print (a)
finally:
print ("bouh")
''', __name__, __file__)
try_ = astng.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 5)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.finalbody[0].fromlineno, 5) # XXX
self.assertEqual(try_.finalbody[0].tolineno, 5)
def test_try_finally_25_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
try:
print (a)
except:
pass
finally:
print ("bouh")
''', __name__, __file__)
try_ = astng.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 7)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.finalbody[0].fromlineno, 7) # XXX
self.assertEqual(try_.finalbody[0].tolineno, 7)
def test_with_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
from __future__ import with_statement
with file("/tmp/pouet") as f:
print (f)
''', __name__, __file__)
with_ = astng.body[1]
self.assertEqual(with_.fromlineno, 3)
self.assertEqual(with_.tolineno, 4)
self.assertEqual(with_.blockstart_tolineno, 3)
class BuilderTC(TestCase):
def setUp(self):
self.builder = builder.ASTNGBuilder()
def test_border_cases(self):
"""check that a file with no trailing new line is parseable"""
self.builder.file_build(join(DATA, 'noendingnewline.py'), 'data.noendingnewline')
self.assertRaises(builder.ASTNGBuildingException,
self.builder.file_build, join(DATA, 'inexistant.py'), 'whatever')
def test_inspect_build0(self):
"""test astng tree build from a living object"""
builtin_astng = MANAGER.astng_from_module_name(BUILTINS)
if sys.version_info < (3, 0):
fclass = builtin_astng['file']
self.assertIn('name', fclass)
self.assertIn('mode', fclass)
self.assertIn('read', fclass)
self.assertTrue(fclass.newstyle)
self.assertTrue(fclass.pytype(), '%s.type' % BUILTINS)
self.assertIsInstance(fclass['read'], nodes.Function)
# check builtin function has args.args == None
dclass = builtin_astng['dict']
self.assertIsNone(dclass['has_key'].args.args)
# just check type and object are there
builtin_astng.getattr('type')
objectastng = builtin_astng.getattr('object')[0]
self.assertIsInstance(objectastng.getattr('__new__')[0], nodes.Function)
# check open file alias
builtin_astng.getattr('open')
# check 'help' is there (defined dynamically by site.py)
builtin_astng.getattr('help')
# check property has __init__
pclass = builtin_astng['property']
self.assertIn('__init__', pclass)
self.assertIsInstance(builtin_astng['None'], nodes.Const)
self.assertIsInstance(builtin_astng['True'], nodes.Const)
self.assertIsInstance(builtin_astng['False'], nodes.Const)
if sys.version_info < (3, 0):
self.assertIsInstance(builtin_astng['Exception'], nodes.From)
self.assertIsInstance(builtin_astng['NotImplementedError'], nodes.From)
else:
self.assertIsInstance(builtin_astng['Exception'], nodes.Class)
self.assertIsInstance(builtin_astng['NotImplementedError'], nodes.Class)
def test_inspect_build1(self):
time_astng = MANAGER.astng_from_module_name('time')
self.assertTrue(time_astng)
self.assertEqual(time_astng['time'].args.defaults, [])
def test_inspect_build2(self):
"""test astng tree build from a living object"""
try:
from mx import DateTime
except ImportError:
self.skipTest('test skipped: mxDateTime is not available')
else:
dt_astng = self.builder.inspect_build(DateTime)
dt_astng.getattr('DateTime')
# this one is failing since DateTimeType.__module__ = 'builtins' !
#dt_astng.getattr('DateTimeType')
def test_inspect_build3(self):
self.builder.inspect_build(unittest)
def test_inspect_build_instance(self):
"""test astng tree build from a living object"""
if sys.version_info >= (3, 0):
self.skipTest('The module "exceptions" is gone in py3.x')
import exceptions
builtin_astng = self.builder.inspect_build(exceptions)
fclass = builtin_astng['OSError']
# things like OSError.strerror are now (2.5) data descriptors on the
# class instead of entries in the __dict__ of an instance
container = fclass
self.assertIn('errno', container)
self.assertIn('strerror', container)
self.assertIn('filename', container)
def test_inspect_build_type_object(self):
builtin_astng = MANAGER.astng_from_module_name(BUILTINS)
infered = list(builtin_astng.igetattr('object'))
self.assertEqual(len(infered), 1)
infered = infered[0]
self.assertEqual(infered.name, 'object')
infered.as_string() # no crash test
infered = list(builtin_astng.igetattr('type'))
self.assertEqual(len(infered), 1)
infered = infered[0]
self.assertEqual(infered.name, 'type')
infered.as_string() # no crash test
def test_package_name(self):
"""test base properties and method of a astng module"""
datap = self.builder.file_build(join(DATA, '__init__.py'), 'data')
self.assertEqual(datap.name, 'data')
self.assertEqual(datap.package, 1)
datap = self.builder.file_build(join(DATA, '__init__.py'), 'data.__init__')
self.assertEqual(datap.name, 'data')
self.assertEqual(datap.package, 1)
def test_yield_parent(self):
"""check if we added discard nodes as yield parent (w/ compiler)"""
data = """
def yiell():
yield 0
if noe:
yield more
"""
func = self.builder.string_build(data).body[0]
self.assertIsInstance(func, nodes.Function)
stmt = func.body[0]
self.assertIsInstance(stmt, nodes.Discard)
self.assertIsInstance(stmt.value, nodes.Yield)
self.assertIsInstance(func.body[1].body[0], nodes.Discard)
self.assertIsInstance(func.body[1].body[0].value, nodes.Yield)
def test_object(self):
obj_astng = self.builder.inspect_build(object)
self.assertIn('__setattr__', obj_astng)
def test_newstyle_detection(self):
data = '''
class A:
"old style"
class B(A):
"old style"
class C(object):
"new style"
class D(C):
"new style"
__metaclass__ = type
class E(A):
"old style"
class F:
"new style"
'''
mod_astng = self.builder.string_build(data, __name__, __file__)
self.assertFalse(mod_astng['A'].newstyle)
self.assertFalse(mod_astng['B'].newstyle)
self.assertTrue(mod_astng['C'].newstyle)
self.assertTrue(mod_astng['D'].newstyle)
self.assertFalse(mod_astng['E'].newstyle)
self.assertTrue(mod_astng['F'].newstyle)
def test_globals(self):
data = '''
CSTE = 1
def update_global():
global CSTE
CSTE += 1
def global_no_effect():
global CSTE2
print (CSTE)
'''
astng = self.builder.string_build(data, __name__, __file__)
self.assertEqual(len(astng.getattr('CSTE')), 2)
self.assertIsInstance(astng.getattr('CSTE')[0], nodes.AssName)
self.assertEqual(astng.getattr('CSTE')[0].fromlineno, 2)
self.assertEqual(astng.getattr('CSTE')[1].fromlineno, 6)
self.assertRaises(NotFoundError,
astng.getattr, 'CSTE2')
self.assertRaises(InferenceError,
astng['global_no_effect'].ilookup('CSTE2').__next__)
def test_socket_build(self):
import socket
astng = self.builder.module_build(socket)
# XXX just check the first one. Actually 3 objects are inferred (look at
# the socket module) but the last one as those attributes dynamically
# set and astng is missing this.
for fclass in astng.igetattr('socket'):
#print fclass.root().name, fclass.name, fclass.lineno
self.assertIn('connect', fclass)
self.assertIn('send', fclass)
self.assertIn('close', fclass)
break
def test_gen_expr_var_scope(self):
data = 'l = list(n for n in range(10))\n'
astng = self.builder.string_build(data, __name__, __file__)
# n unavailable outside gen expr scope
self.assertNotIn('n', astng)
# test n is inferable anyway
n = get_name_node(astng, 'n')
self.assertIsNot(n.scope(), astng)
self.assertEqual([i.__class__ for i in n.infer()],
[YES.__class__])
class FileBuildTC(TestCase):
module = builder.ASTNGBuilder().file_build(join(DATA, 'module.py'), 'data.module')
def test_module_base_props(self):
"""test base properties and method of a astng module"""
module = self.module
self.assertEqual(module.name, 'data.module')
self.assertEqual(module.doc, "test module for astng\n")
self.assertEqual(module.fromlineno, 0)
self.assertIsNone(module.parent)
self.assertEqual(module.frame(), module)
self.assertEqual(module.root(), module)
self.assertEqual(module.file, join(abspath(data.__path__[0]), 'module.py'))
self.assertEqual(module.pure_python, 1)
self.assertEqual(module.package, 0)
self.assertFalse(module.is_statement)
self.assertEqual(module.statement(), module)
self.assertEqual(module.statement(), module)
def test_module_locals(self):
"""test the 'locals' dictionary of a astng module"""
module = self.module
_locals = module.locals
self.assertIs(_locals, module.globals)
keys = sorted(_locals.keys())
should = ['MY_DICT', 'YO', 'YOUPI',
'__revision__', 'global_access','modutils', 'four_args',
'os', 'redirect', 'spawn', 'LocalsVisitor', 'ASTWalker']
should.sort()
self.assertEqual(keys, should)
def test_function_base_props(self):
"""test base properties and method of a astng function"""
module = self.module
function = module['global_access']
self.assertEqual(function.name, 'global_access')
self.assertEqual(function.doc, 'function test')
self.assertEqual(function.fromlineno, 11)
self.assertTrue(function.parent)
self.assertEqual(function.frame(), function)
self.assertEqual(function.parent.frame(), module)
self.assertEqual(function.root(), module)
self.assertEqual([n.name for n in function.args.args], ['key', 'val'])
self.assertEqual(function.type, 'function')
def test_function_locals(self):
"""test the 'locals' dictionary of a astng function"""
_locals = self.module['global_access'].locals
self.assertEqual(len(_locals), 4)
keys = sorted(_locals.keys())
self.assertEqual(keys, ['i', 'key', 'local', 'val'])
def test_class_base_props(self):
"""test base properties and method of a astng class"""
module = self.module
klass = module['YO']
self.assertEqual(klass.name, 'YO')
self.assertEqual(klass.doc, 'hehe')
self.assertEqual(klass.fromlineno, 25)
self.assertTrue(klass.parent)
self.assertEqual(klass.frame(), klass)
self.assertEqual(klass.parent.frame(), module)
self.assertEqual(klass.root(), module)
self.assertEqual(klass.basenames, [])
self.assertEqual(klass.newstyle, False)
def test_class_locals(self):
"""test the 'locals' dictionary of a astng class"""
module = self.module
klass1 = module['YO']
locals1 = klass1.locals
keys = sorted(locals1.keys())
self.assertEqual(keys, ['__init__', 'a'])
klass2 = module['YOUPI']
locals2 = klass2.locals
keys = list(locals2.keys())
keys.sort()
self.assertEqual(keys, ['__init__', 'class_attr', 'class_method',
'method', 'static_method'])
def test_class_instance_attrs(self):
module = self.module
klass1 = module['YO']
klass2 = module['YOUPI']
self.assertEqual(list(klass1.instance_attrs.keys()), ['yo'])
self.assertEqual(list(klass2.instance_attrs.keys()), ['member'])
def test_class_basenames(self):
module = self.module
klass1 = module['YO']
klass2 = module['YOUPI']
self.assertEqual(klass1.basenames, [])
self.assertEqual(klass2.basenames, ['YO'])
def test_method_base_props(self):
"""test base properties and method of a astng method"""
klass2 = self.module['YOUPI']
# "normal" method
method = klass2['method']
self.assertEqual(method.name, 'method')
self.assertEqual([n.name for n in method.args.args], ['self'])
self.assertEqual(method.doc, 'method test')
self.assertEqual(method.fromlineno, 47)
self.assertEqual(method.type, 'method')
# class method
method = klass2['class_method']
self.assertEqual([n.name for n in method.args.args], ['cls'])
self.assertEqual(method.type, 'classmethod')
# static method
method = klass2['static_method']
self.assertEqual(method.args.args, [])
self.assertEqual(method.type, 'staticmethod')
def test_method_locals(self):
"""test the 'locals' dictionary of a astng method"""
method = self.module['YOUPI']['method']
_locals = method.locals
keys = sorted(_locals)
if sys.version_info < (3, 0):
self.assertEqual(len(_locals), 5)
self.assertEqual(keys, ['a', 'autre', 'b', 'local', 'self'])
else:# ListComp variables are no more accessible outside
self.assertEqual(len(_locals), 3)
self.assertEqual(keys, ['autre', 'local', 'self'])
class ModuleBuildTC(FileBuildTC):
def setUp(self):
abuilder = builder.ASTNGBuilder()
self.module = abuilder.module_build(test_module)
class MoreTC(TestCase):
def setUp(self):
self.builder = builder.ASTNGBuilder()
def test_infered_build(self):
code = '''class A: pass
A.type = "class"
def A_ass_type(self):
print (self)
A.ass_type = A_ass_type
'''
astng = self.builder.string_build(code)
lclass = list(astng.igetattr('A'))
self.assertEqual(len(lclass), 1)
lclass = lclass[0]
self.assertIn('ass_type', lclass.locals)
self.assertIn('type', lclass.locals)
def test_augassign_attr(self):
astng = self.builder.string_build("""class Counter:
v = 0
def inc(self):
self.v += 1
""", __name__, __file__)
# Check self.v += 1 generate AugAssign(AssAttr(...)), not AugAssign(GetAttr(AssName...))
def test_dumb_module(self):
astng = self.builder.string_build("pouet")
def test_infered_dont_pollute(self):
code = '''
def func(a=None):
a.custom_attr = 0
def func2(a={}):
a.custom_attr = 0
'''
astng = self.builder.string_build(code)
nonetype = nodes.const_factory(None)
self.assertNotIn('custom_attr', nonetype.locals)
self.assertNotIn('custom_attr', nonetype.instance_attrs)
nonetype = nodes.const_factory({})
self.assertNotIn('custom_attr', nonetype.locals)
self.assertNotIn('custom_attr', nonetype.instance_attrs)
def test_asstuple(self):
code = 'a, b = range(2)'
astng = self.builder.string_build(code)
self.assertIn('b', astng.locals)
code = '''
def visit_if(self, node):
node.test, body = node.tests[0]
'''
astng = self.builder.string_build(code)
self.assertIn('body', astng['visit_if'].locals)
def test_build_constants(self):
'''test expected values of constants after rebuilding'''
code = '''
def func():
return None
return
return 'None'
'''
astng = self.builder.string_build(code)
none, nothing, chain = [ret.value for ret in astng.body[0].body]
self.assertIsInstance(none, nodes.Const)
self.assertIsNone(none.value)
self.assertIsNone(nothing)
self.assertIsInstance(chain, nodes.Const)
self.assertEqual(chain.value, 'None')
def test_lgc_classproperty(self):
'''test expected values of constants after rebuilding'''
code = '''
from logilab.common.decorators import classproperty
class A(object):
@classproperty
def hop(cls):
return None
'''
astng = self.builder.string_build(code)
self.assertEqual(astng['A']['hop'].type, 'classmethod')
if sys.version_info < (3, 0):
guess_encoding = builder._guess_encoding
class TestGuessEncoding(TestCase):
def testEmacs(self):
e = guess_encoding('# -*- coding: UTF-8 -*-')
self.assertEqual(e, 'UTF-8')
e = guess_encoding('# -*- coding:UTF-8 -*-')
self.assertEqual(e, 'UTF-8')
e = guess_encoding('''
### -*- coding: ISO-8859-1 -*-
''')
self.assertEqual(e, 'ISO-8859-1')
e = guess_encoding('''
### -*- coding: ISO-8859-1 -*-
''')
self.assertIsNone(e)
def testVim(self):
e = guess_encoding('# vim:fileencoding=UTF-8')
self.assertEqual(e, 'UTF-8')
e = guess_encoding('''
### vim:fileencoding=ISO-8859-1
''')
self.assertEqual(e, 'ISO-8859-1')
e = guess_encoding('''
### vim:fileencoding= ISO-8859-1
''')
self.assertIsNone(e)
def test_wrong_coding(self):
# setting "coding" varaible
e = guess_encoding("coding = UTF-8")
self.assertIsNone(e)
# setting a dictionnary entry
e = guess_encoding("coding:UTF-8")
self.assertIsNone(e)
# setting an arguement
e = guess_encoding("def do_something(a_word_with_coding=None):")
self.assertIsNone(e)
def testUTF8(self):
e = guess_encoding('\xef\xbb\xbf any UTF-8 data')
self.assertEqual(e, 'UTF-8')
e = guess_encoding(' any UTF-8 data \xef\xbb\xbf')
self.assertIsNone(e)
if __name__ == '__main__':
unittest_main()
| {
"content_hash": "87a819cd533b83a1bd6a9a6d148815cc",
"timestamp": "",
"source": "github",
"line_count": 712,
"max_line_length": 115,
"avg_line_length": 35.57303370786517,
"alnum_prop": 0.6027321541377132,
"repo_name": "tlksio/tlksio",
"id": "d2b17a654c2aed7d5cf77dd854f449b766475091",
"size": "26164",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "env/lib/python3.4/site-packages/logilab/astng/test/unittest_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1668"
},
{
"name": "HTML",
"bytes": "65037"
},
{
"name": "JavaScript",
"bytes": "450"
},
{
"name": "Makefile",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "42727"
}
],
"symlink_target": ""
} |
import numpy as np
import nibabel as nib
def save2nifti(fpath, data, header=None):
"""
Save to a nifti file.
Parameters
----------
fpath : string
The file path to output
data : numpy array
header : Nifti2Header
"""
img = nib.Nifti2Image(data, None, header=header)
nib.nifti2.save(img, fpath)
class GiftiReader(object):
def __init__(self, file_path):
self._fpath = file_path
self.full_data = nib.load(file_path)
@property
def coords(self):
if self._fpath.endswith('.surf.gii'):
return self.full_data.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0].data
# return self.full_data.darrays[0].data
else:
return None
@property
def faces(self):
if self._fpath.endswith('.surf.gii'):
return self.full_data.get_arrays_from_intent('NIFTI_INTENT_TRIANGLE')[0].data
# return self.full_data.darrays[1].data
else:
return None
@property
def scalar_data(self):
if self._fpath.endswith('.surf.gii'):
return None
else:
return self.full_data.darrays[0].data
class CiftiReader(object):
def __init__(self, file_path):
self.full_data = nib.cifti2.cifti2.load(file_path)
@property
def header(self):
return self.full_data.header
@property
def brain_structures(self):
return [_.brain_structure for _ in self.header.get_index_map(1).brain_models]
@property
def volume(self):
return self.header.get_index_map(1).volume
def brain_models(self, structures=None):
"""
get brain model from cifti file
Parameter:
---------
structures: list of str
Each structure corresponds to a brain model.
If None, get all brain models.
Return:
------
brain_models: list of Cifti2BrainModel
"""
brain_models = list(self.header.get_index_map(1).brain_models)
if structures is not None:
if not isinstance(structures, list):
raise TypeError("The parameter 'structures' must be a list")
brain_models = [brain_models[self.brain_structures.index(s)] for s in structures]
return brain_models
def map_names(self, rows=None):
"""
get map names
Parameters:
----------
rows: sequence of integer
Specify which map names should be got.
If None, get all map names
Return:
------
map_names: list of str
"""
named_maps = list(self.header.get_index_map(0).named_maps)
if named_maps:
if rows is None:
map_names = [named_map.map_name for named_map in named_maps]
else:
map_names = [named_maps[i].map_name for i in rows]
else:
map_names = []
return map_names
def label_tables(self, rows=None):
"""
get label tables
Parameters:
----------
rows: sequence of integer
Specify which label tables should be got.
If None, get all label tables.
Return:
------
label_tables: list of Cifti2LableTable
"""
named_maps = list(self.header.get_index_map(0).named_maps)
if named_maps:
if rows is None:
label_tables = [named_map.label_table for named_map in named_maps]
else:
label_tables = [named_maps[i].label_table for i in rows]
else:
label_tables = []
return label_tables
def get_data(self, structure=None, zeroize=False):
"""
get data from cifti file
Parameters:
----------
structure: str
One structure corresponds to one brain model.
specify which brain structure's data should be extracted
If None, get all structures, meanwhile ignore parameter 'zeroize'.
zeroize: bool
If true, get data after filling zeros for the missing vertices.
Return:
------
data: numpy array
If zeroize is False, the data is (maps, values).
If zeroize is True and brain model type is SURFACE, the data is (maps, values) with filled zeros.
If zeroize is True and brain model type is VOXELS, the data is (maps, volumes) with filled zeros.
"""
_data = np.array(self.full_data.get_data())
if structure is not None:
brain_model = self.brain_models([structure])[0]
offset = brain_model.index_offset
count = brain_model.index_count
if zeroize:
if brain_model.model_type == 'CIFTI_MODEL_TYPE_SURFACE':
n_vtx = brain_model.surface_number_of_vertices
data = np.zeros((_data.shape[0], n_vtx), _data.dtype)
data[:, list(brain_model.vertex_indices)] = _data[:, offset:offset+count]
elif brain_model.model_type == 'CIFTI_MODEL_TYPE_VOXELS':
# This function have not been verified visually.
vol_shape = self.header.get_index_map(1).volume.volume_dimensions
data_shape = (_data.shape[0],) + vol_shape
data_ijk = np.array(list(brain_model.voxel_indices_ijk))
data = np.zeros(data_shape, _data.dtype)
data[:, data_ijk[:, 0], data_ijk[:, 1], data_ijk[:, 2]] = _data[:, offset:offset+count]
else:
raise RuntimeError("The function can't support the brain model: {}".format(brain_model.model_type))
else:
data = _data[:, offset:offset+count]
else:
data = _data
return data
| {
"content_hash": "7213ba507fc68752cdfa3041ec544c50",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 119,
"avg_line_length": 32.324175824175825,
"alnum_prop": 0.554479007309196,
"repo_name": "BNUCNL/FreeROI",
"id": "963e131935192ee49efb4f003b92a2bb29bdd517",
"size": "5883",
"binary": false,
"copies": "2",
"ref": "refs/heads/surface_lab",
"path": "froi/io/io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "827149"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
} |
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from http import client as http_client
from urllib import parse as urlparse
from oslo_utils import strutils
from ironicclient.common.apiclient import exceptions
from ironicclient.common.i18n import _
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == http_client.NO_CONTENT
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
class ManagerWithFind(BaseManager, metaclass=abc.ABCMeta):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in kwargs.copy().items():
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % urlparse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % urlparse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion."""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in info.items():
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
| {
"content_hash": "3d58a262d29bb2765d57b2168b75d064",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 79,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.5673338275265629,
"repo_name": "openstack/python-ironicclient",
"id": "a1a13999ae8610ac8be030c8665ee2252e581284",
"size": "16928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironicclient/common/apiclient/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1240609"
},
{
"name": "Shell",
"bytes": "218"
}
],
"symlink_target": ""
} |
"""A QR and BWM Find SCU application.
For sending Query/Retrieve (QR) and Basic Worklist Modality (BWM) C-FIND
requests to a QR/BWM - Find SCP.
"""
import argparse
import sys
from pydicom.dataset import Dataset
from pydicom.uid import generate_uid
from pynetdicom import (
AE,
BasicWorklistManagementPresentationContexts,
QueryRetrievePresentationContexts,
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION,
PYNETDICOM_UID_PREFIX,
)
from pynetdicom.apps.common import create_dataset, setup_logging
from pynetdicom._globals import DEFAULT_MAX_LENGTH
from pynetdicom.pdu_primitives import SOPClassExtendedNegotiation
from pynetdicom.sop_class import (
ModalityWorklistInformationFind,
PatientRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelFind,
PatientStudyOnlyQueryRetrieveInformationModelFind,
)
__version__ = "0.2.0"
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description=(
"The findscu application implements a Service Class User "
"(SCU) for the Query/Retrieve (QR) and Basic Worklist Management "
"(BWM) Service Classes. findscu only supports query functionality "
"using the C-FIND message. It sends query keys to an SCP and "
"waits for a response. The application can be used to test SCPs "
"of the QR and BWM Service Classes."
),
usage="findscu [options] addr port",
)
# Parameters
req_opts = parser.add_argument_group("Parameters")
req_opts.add_argument(
"addr", help="TCP/IP address or hostname of DICOM peer", type=str
)
req_opts.add_argument("port", help="TCP/IP port number of peer", type=int)
# General Options
gen_opts = parser.add_argument_group("General Options")
gen_opts.add_argument(
"--version", help="print version information and exit", action="store_true"
)
output = gen_opts.add_mutually_exclusive_group()
output.add_argument(
"-q",
"--quiet",
help="quiet mode, print no warnings and errors",
action="store_const",
dest="log_type",
const="q",
)
output.add_argument(
"-v",
"--verbose",
help="verbose mode, print processing details",
action="store_const",
dest="log_type",
const="v",
)
output.add_argument(
"-d",
"--debug",
help="debug mode, print debug information",
action="store_const",
dest="log_type",
const="d",
)
gen_opts.add_argument(
"-ll",
"--log-level",
metavar="[l]",
help=("use level l for the logger (fatal, error, warn, info, debug, trace)"),
type=str,
choices=["fatal", "error", "warn", "info", "debug", "trace"],
)
parser.set_defaults(log_type="v")
# Network Options
net_opts = parser.add_argument_group("Network Options")
net_opts.add_argument(
"-aet",
"--calling-aet",
metavar="[a]etitle",
help="set my calling AE title (default: FINDSCU)",
type=str,
default="FINDSCU",
)
net_opts.add_argument(
"-aec",
"--called-aet",
metavar="[a]etitle",
help="set called AE title of peer (default: ANY-SCP)",
type=str,
default="ANY-SCP",
)
net_opts.add_argument(
"-ta",
"--acse-timeout",
metavar="[s]econds",
help="timeout for ACSE messages (default: 30 s)",
type=float,
default=30,
)
net_opts.add_argument(
"-td",
"--dimse-timeout",
metavar="[s]econds",
help="timeout for DIMSE messages (default: 30 s)",
type=float,
default=30,
)
net_opts.add_argument(
"-tn",
"--network-timeout",
metavar="[s]econds",
help="timeout for the network (default: 30 s)",
type=float,
default=30,
)
net_opts.add_argument(
"-pdu",
"--max-pdu",
metavar="[n]umber of bytes",
help=(
f"set max receive pdu to n bytes (0 for unlimited, "
f"default: {DEFAULT_MAX_LENGTH})"
),
type=int,
default=DEFAULT_MAX_LENGTH,
)
# Query information model choices
qr_group = parser.add_argument_group("Query Information Model Options")
qr_model = qr_group.add_mutually_exclusive_group()
qr_model.add_argument(
"-P",
"--patient",
help="use patient root information model (default)",
action="store_true",
)
qr_model.add_argument(
"-S", "--study", help="use study root information model", action="store_true"
)
qr_model.add_argument(
"-O",
"--psonly",
help="use patient/study only information model",
action="store_true",
)
qr_model.add_argument(
"-W",
"--worklist",
help="use modality worklist information model",
action="store_true",
)
qr_query = parser.add_argument_group("Query Options")
qr_query.add_argument(
"-k",
"--keyword",
metavar="[k]eyword: (gggg,eeee)=str, keyword=str",
help=(
"add or override a query element using either an element tag as "
"(group,element) or the element's keyword (such as PatientName)"
),
type=str,
action="append",
)
qr_query.add_argument(
"-f",
"--file",
metavar="path to [f]ile",
help=(
"use a DICOM file as the query dataset, if "
"used with -k then the elements will be added to or overwrite "
"those present in the file"
),
type=str,
)
out_opts = parser.add_argument_group("Output Options")
out_opts.add_argument(
"-w",
"--write",
help=("write the responses to file as rsp000001.dcm, rsp000002.dcm, ..."),
action="store_true",
)
ext_neg = parser.add_argument_group("Extended Negotiation Options")
ext_neg.add_argument(
"--relational-query",
help="request the use of relational queries",
action="store_true",
)
ext_neg.add_argument(
"--dt-matching",
help="request the use of date-time matching",
action="store_true",
)
ext_neg.add_argument(
"--fuzzy-names",
help="request the use of fuzzy semantic matching of person names",
action="store_true",
)
ext_neg.add_argument(
"--timezone-adj",
help="request the use of timezone query adjustment",
action="store_true",
)
ext_neg.add_argument(
"--enhanced-conversion",
help="request the use of enhanced multi-frame image conversion",
action="store_true",
)
ns = parser.parse_args()
if ns.version:
pass
elif not bool(ns.file) and not bool(ns.keyword):
parser.error("-f and/or -k must be specified")
return ns
def get_file_meta(assoc, query_model):
"""Return a Dataset containing sufficient File Meta elements
for conformance.
"""
cx = assoc._get_valid_context(query_model, "", "scu")
file_meta = Dataset()
file_meta.TransferSyntaxUID = cx.transfer_syntax[0]
file_meta.MediaStorageSOPClassUID = query_model
file_meta.MediaStorageSOPInstanceUID = generate_uid(prefix=PYNETDICOM_UID_PREFIX)
file_meta.ImplementationClassUID = PYNETDICOM_IMPLEMENTATION_UID
file_meta.ImplementationVersionName = PYNETDICOM_IMPLEMENTATION_VERSION
return file_meta
def generate_filename():
"""Return a `str` filename for extracted C-FIND responses."""
ii = 1
while True:
yield f"rsp{ii:06d}.dcm"
ii += 1
def main(args=None):
"""Run the application."""
if args is not None:
sys.argv = args
args = _setup_argparser()
if args.version:
print(f"findscu.py v{__version__}")
sys.exit()
APP_LOGGER = setup_logging(args, "findscu")
APP_LOGGER.debug(f"findscu.py v{__version__}")
APP_LOGGER.debug("")
# Create query (identifier) dataset
try:
# If you're looking at this to see how QR Find works then `identifer`
# is a pydicom Dataset instance with your query keys, e.g.:
# identifier = Dataset()
# identifier.QueryRetrieveLevel = 'PATIENT'
# identifier.PatientName = ''
identifier = create_dataset(args, APP_LOGGER)
except Exception as exc:
APP_LOGGER.exception(exc)
sys.exit(1)
# Create application entity
# Binding to port 0 lets the OS pick an available port
ae = AE(ae_title=args.calling_aet)
# Set timeouts
ae.acse_timeout = args.acse_timeout
ae.dimse_timeout = args.dimse_timeout
ae.network_timeout = args.network_timeout
# Set the Presentation Contexts we are requesting the Find SCP support
ae.requested_contexts = (
QueryRetrievePresentationContexts + BasicWorklistManagementPresentationContexts
)
# Query/Retrieve Information Models
if args.worklist:
query_model = ModalityWorklistInformationFind
elif args.study:
query_model = StudyRootQueryRetrieveInformationModelFind
elif args.psonly:
query_model = PatientStudyOnlyQueryRetrieveInformationModelFind
else:
query_model = PatientRootQueryRetrieveInformationModelFind
# Extended Negotiation
ext_neg = []
ext_opts = [
args.relational_query,
args.dt_matching,
args.fuzzy_names,
args.timezone_adj,
args.enhanced_conversion,
]
if not args.worklist and any(ext_opts):
app_info = b""
for option in ext_opts:
app_info += b"\x01" if option else b"\x00"
item = SOPClassExtendedNegotiation()
item.sop_class_uid = query_model
item.service_class_application_information = app_info
ext_neg = [item]
elif args.worklist and any([args.fuzzy_names, args.timezone_adj]):
app_info = b"\x01\x01"
for option in [args.fuzzy_names, args.timezone_adj]:
app_info += b"\x01" if option else b"\x00"
item = SOPClassExtendedNegotiation()
item.sop_class_uid = query_model
item.service_class_application_information = app_info
ext_neg = [item]
# Request association with (QR/BWM) Find SCP
assoc = ae.associate(
args.addr,
args.port,
ae_title=args.called_aet,
max_pdu=args.max_pdu,
ext_neg=ext_neg,
)
if assoc.is_established:
# Send C-FIND request, `responses` is a generator
responses = assoc.send_c_find(identifier, query_model)
# Used to generate filenames if args.write used
fname = generate_filename()
for (status, rsp_identifier) in responses:
# If `status.Status` is one of the 'Pending' statuses then
# `rsp_identifier` is the C-FIND response's Identifier dataset
if status and status.Status in [0xFF00, 0xFF01]:
if args.write:
rsp_identifier.file_meta = get_file_meta(assoc, query_model)
rsp_identifier.save_as(next(fname), write_like_original=False)
# Release the association
assoc.release()
else:
sys.exit(1)
if __name__ == "__main__":
main()
| {
"content_hash": "2384b409922aa96a45f9106ebdb2e22e",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 87,
"avg_line_length": 30.668449197860962,
"alnum_prop": 0.6056669572798605,
"repo_name": "scaramallion/pynetdicom",
"id": "6fd033d324f5d3ca45ccafd8070cbec05c1cf378",
"size": "11492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynetdicom/apps/findscu/findscu.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3618716"
},
{
"name": "Shell",
"bytes": "6607"
}
],
"symlink_target": ""
} |
import sys
import argparse
import os
import shutil
GPM_HOME = os.path.dirname(os.path.realpath(__file__))
PACKAGES = ["ide", "pyGP", "pyGP-computervision", "pyGP-slam", "pyGP-ros", "luaGP", "juliaGP", "jGP"]
def get_path(file, context=None):
if context is None:
path = os.path.join(GPM_HOME, file)
else:
path = os.path.join(GPM_HOME, context, file)
if os.path.exists(path):
return path
elif os.path.exists(file):
return file
else:
return None
def parse_args():
parser = argparse.ArgumentParser(description='Graph Programming Manager')
parser.add_argument("--install", help="Install a package")
parser.add_argument("--url", help="Url used for cloning (on install)")
parser.add_argument("--upgrade", help="Upgrade a package")
parser.add_argument("--uninstall", help="Completely uninstall a package removing all data")
parser.add_argument("--list", help="Print a list of all availible packages")
args = parser.parse_args()
if args.install is None and args.upgrade is None and args.uninstall is None and args.list is None:
parser.print_help()
return args
def get_package_path(package):
target_path = GPM_HOME
package_dir = package
if len(package.split("-")) > 1:
split = package.split("-")
target_path = os.path.join(GPM_HOME, split[0], "extlib")
package_dir = split[1]
path = os.path.join(target_path, package_dir)
return path
def install(package, url=None):
if url is None:
url = "https://github.com/GraphProgramming/{}.git".format(package)
path = get_package_path(package)
if not os.path.exists(path):
clone = "git clone {} {}".format(url, path)
os.system(clone)
else:
print("Package {} already installed try upgrading.".format(package))
def uninstall(package):
path = get_package_path(package)
if os.path.exists(path):
shutil.rmtree(path)
else:
print("Package {} not installed.".format(package))
def upgrade(package):
path = get_package_path(package)
if os.path.exists(path):
os.chdir(path)
os.system("git pull")
else:
print("Package {} not installed.".format(package))
def list_packages():
print("Official Packages:")
print("------------------")
print("\n".join(PACKAGES))
def main():
args = parse_args()
if args.install is not None:
install(args.install, url=args.url)
if args.upgrade is not None:
upgrade(args.upgrade)
if args.uninstall is not None:
uninstall(args.uninstall)
if args.list is not None:
list_packages()
if __name__ == "__main__":
main()
| {
"content_hash": "b032e163aa2395f23a9216f3e02aee7f",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 102,
"avg_line_length": 29.347826086956523,
"alnum_prop": 0.6303703703703704,
"repo_name": "GraphProgramming/GraphProgramming",
"id": "901e24be174226ebcc7411770d98a05f5faf20f7",
"size": "2700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpm/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4019"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from mock import patch, MagicMock
import frontend.helpers.sessions as module
from lib.irma.common.exceptions import IrmaDatabaseError
class TestSessions(TestCase):
@patch("frontend.helpers.sessions.db_session")
def test001_transaction(self, m_db_session):
with module.session_transaction():
pass
m_db_session.commit.assert_called()
m_db_session.rollback.assert_not_called()
m_db_session.close.assert_called()
@patch("frontend.helpers.sessions.db_session")
def test002_transaction_error(self, m_db_session):
exception = IrmaDatabaseError
with self.assertRaises(exception):
with module.session_transaction():
raise exception
m_db_session.commit.assert_not_called()
m_db_session.rollback.assert_called()
m_db_session.close.assert_called()
@patch("frontend.helpers.sessions.db_session")
def test003_query(self, m_db_session):
with module.session_query():
pass
m_db_session.commit.assert_not_called()
m_db_session.rollback.assert_not_called()
m_db_session.close.assert_not_called()
@patch("frontend.helpers.sessions.db_session")
def test004_query_error(self, m_db_session):
exception = IrmaDatabaseError
with self.assertRaises(exception):
with module.session_query():
raise exception
m_db_session.commit.assert_not_called()
m_db_session.rollback.assert_not_called()
m_db_session.close.assert_not_called()
| {
"content_hash": "7f85638c4ebbae31cdf709cb0adac192",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 56,
"avg_line_length": 36.20454545454545,
"alnum_prop": 0.6666666666666666,
"repo_name": "deloittem/irma-frontend",
"id": "2055646624731789f385c708d95439eace52b1a7",
"size": "1593",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/helpers/test_sessions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "229845"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "HTML",
"bytes": "24102"
},
{
"name": "JavaScript",
"bytes": "1773453"
},
{
"name": "Makefile",
"bytes": "92"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "261983"
},
{
"name": "Shell",
"bytes": "16816"
}
],
"symlink_target": ""
} |
import itertools
from collections import defaultdict
from networkx.utils.union_find import UnionFind
def calculate_hamming_clusters(numbers):
numbers_map = defaultdict(list)
for index, node in enumerate(numbers):
numbers_map[node].append(index)
# no duplicates in the union find
union_find = UnionFind(numbers_map)
hamming_distance_one = [1 << i for i in range(24)]
hamming_distance_two = [1 << i ^ 1 << j for i,
j in itertools.combinations(range(24), 2)]
hamming_distances = [*hamming_distance_one, *hamming_distance_two]
keys = list(numbers_map)
for distance_mask in hamming_distances:
for key in keys:
key2 = key ^ distance_mask
if numbers_map[key2]:
union_find.union(key, key2)
return len(list(union_find.to_sets()))
| {
"content_hash": "0e10c9b2cdc6a45307350a20cecdcf31",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 31.555555555555557,
"alnum_prop": 0.6408450704225352,
"repo_name": "manoldonev/algo1-assignments",
"id": "3c87e458cac1abf8d995ce3142a6ad0da8368222",
"size": "852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/course3/week2/hamming.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "15419"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import upload.models
class Migration(migrations.Migration):
dependencies = [
('upload', '0002_auto_20150804_2100'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='photo',
field=models.ImageField(upload_to=upload.models.setFilePath),
),
]
| {
"content_hash": "f7001233523cee2a24fac2db780696db",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 22.105263157894736,
"alnum_prop": 0.6190476190476191,
"repo_name": "dtekcth/DNollK.se",
"id": "9db077ab1a51d63d3f672fcba7f46ddc988735af",
"size": "492",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "upload/migrations/0003_auto_20160723_1252.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "135297"
},
{
"name": "HTML",
"bytes": "219200"
},
{
"name": "JavaScript",
"bytes": "950971"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "66374"
}
],
"symlink_target": ""
} |
import regex as re
from datetime import datetime, timedelta, tzinfo
from .timezones import timezone_info_list
class StaticTzInfo(tzinfo):
def __init__(self, name, offset):
self.__offset = offset
self.__name = name
def tzname(self, dt):
return self.__name
def utcoffset(self, dt):
return self.__offset
def dst(self, dt):
return timedelta(0)
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.__name)
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def pop_tz_offset_from_string(date_string, as_offset=True):
for name, info in _tz_offsets:
timezone_re = info['regex']
if timezone_re.search(date_string):
date_string = timezone_re.sub(r'\1', date_string)
return date_string, StaticTzInfo(name, info['offset']) if as_offset else name
else:
return date_string, None
def convert_to_local_tz(datetime_obj, datetime_tz_offset):
return datetime_obj - datetime_tz_offset + local_tz_offset
def get_tz_offsets():
def get_offset(tz_obj, regex, repl='', replw=''):
return (
tz_obj[0],
{
'regex': re.compile(re.sub(repl, replw, regex % tz_obj[0]), re.IGNORECASE),
'offset': timedelta(seconds=tz_obj[1])
}
)
for tz_info in timezone_info_list:
for regex in tz_info['regex_patterns']:
for tz_obj in tz_info['timezones']:
yield get_offset(tz_obj, regex)
# alternate patterns
for replace, replacewith in tz_info.get('replace', []):
for tz_obj in tz_info['timezones']:
yield get_offset(tz_obj, regex, repl=replace, replw=replacewith)
def get_local_tz_offset():
offset = datetime.now() - datetime.utcnow()
offset = timedelta(days=offset.days, seconds=round(offset.seconds, -1))
return offset
_tz_offsets = list(get_tz_offsets())
local_tz_offset = get_local_tz_offset()
| {
"content_hash": "a17e5f083b50d37534f0586605f9181c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 91,
"avg_line_length": 29.18918918918919,
"alnum_prop": 0.5949074074074074,
"repo_name": "Samuel789/MediPi",
"id": "5f5e3da93a46d71f4d613fa5af344742c85d66ba",
"size": "2184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MedManagementWeb/env/lib/python3.5/site-packages/dateparser/timezone_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10012"
},
{
"name": "CSS",
"bytes": "847678"
},
{
"name": "HTML",
"bytes": "4238145"
},
{
"name": "Java",
"bytes": "1942198"
},
{
"name": "JavaScript",
"bytes": "2308166"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "66091"
},
{
"name": "Ruby",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "17053"
}
],
"symlink_target": ""
} |
"""Regions used in Relay."""
from ...runtime import Object
from . import _ffi_api
class AnnotatedRegionSet(Object):
"""Class to represent a relay expression split into regions."""
def __init__(self, expr, region_begin_op, region_end_op):
"""Construct regions from an expression.
Parameters
----------
expr : tvm.relay.Expr
The expression from which to construct the regions.
region_begin_op : tvm.ir.Op
The region begin annotation.
region_end_op : tvm.ir.Op
The region end annotation.
"""
self.__init_handle_by_constructor__(
_ffi_api.AnnotatedRegionSet, expr, region_begin_op, region_end_op
)
def __len__(self):
return len(self.regions)
def get_region(self, expr):
"""Get the region an expression belongs to.
Parameters
----------
expr : tvm.relay.Expr
The expression.
Returns
-------
region
The region containing the expression.
None if not found.
"""
return _ffi_api.GetRegion(self, expr)
| {
"content_hash": "263d591b7b3c98233f914cad2f210fe6",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 26.227272727272727,
"alnum_prop": 0.5606585788561526,
"repo_name": "Laurawly/tvm-1",
"id": "a18ccb97836bb2b77a5f57853a9f996aaf99fc06",
"size": "2024",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/tvm/relay/analysis/annotated_regions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
} |
"""manifest module for linux runtime
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import shlex
from treadmill import subproc
from treadmill.appcfg import manifest as app_manifest
_LOGGER = logging.getLogger(__name__)
TREADMILL_BIND_PATH = '/opt/treadmill-bind'
def add_runtime(tm_env, manifest):
"""Adds linux (docker) runtime specific details to the manifest.
"""
_transform_services(manifest)
app_manifest.add_linux_system_services(tm_env, manifest)
app_manifest.add_linux_services(manifest)
def _get_docker_run_cmd(name, image,
uidgid=None,
commands=None,
use_shell=True):
"""Get docker run cmd from raw command
"""
tpl = (
'exec $TREADMILL/bin/treadmill sproc docker'
' --name {name}'
' --envdirs /env,/docker/env,/services/{name}/env'
)
# FIXME: hardcode volumes for now
treadmill_bind = subproc.resolve('treadmill_bind_distro')
volumes = [
('/var/log', '/var/log', 'rw'),
('/var/spool', '/var/spool', 'rw'),
('/var/tmp', '/var/tmp', 'rw'),
('/docker/etc/hosts', '/etc/hosts', 'ro'),
('/docker/etc/passwd', '/etc/passwd', 'ro'),
('/docker/etc/group', '/etc/group', 'ro'),
('/env', '/env', 'ro'),
(treadmill_bind, TREADMILL_BIND_PATH, 'ro'),
]
for volume in volumes:
tpl += ' --volume {source}:{dest}:{mode}'.format(
source=volume[0],
dest=volume[1],
mode=volume[2]
)
if uidgid is not None:
tpl += ' --user {uidgid}'.format(uidgid=uidgid)
tpl += ' --image {image}'
# put entrypoint and image in the last
if commands is not None:
commands = shlex.split(commands)
if not use_shell:
tpl += ' --entrypoint {entrypoint}'
entrypoint = commands.pop(0)
else:
entrypoint = None
if commands:
tpl += ' -- {cmds}'
else:
commands = []
entrypoint = None
return tpl.format(
name=name,
image=shlex.quote(image),
entrypoint=entrypoint,
cmds=' '.join((shlex.quote(cmd) for cmd in commands))
)
def _transform_services(manifest):
"""Adds linux runtime specific details to the manifest.
returns:
int -- number of docker services in the manifest
"""
# Normalize restart count
services = []
for service in manifest.get('services', []):
if 'image' in service:
cmd = _get_docker_run_cmd(name=service['name'],
image=service['image'],
commands=service.get('command', None),
use_shell=service.get('useshell', False))
else:
# TODO: Implement use_shell=False for standard commands.
cmd = service['command']
services.append(
{
'name': service['name'],
'command': cmd,
'restart': {
'limit': int(service['restart']['limit']),
'interval': int(service['restart']['interval']),
},
'root': service.get('root', False),
'proid': (
'root' if service.get('root', False)
else manifest['proid']
),
'environ': service.get('environ', []),
'config': None,
'downed': service.get('downed', False),
'trace': True,
'logger': service.get('logger', 's6.app-logger.run'),
}
)
manifest['services'] = services
| {
"content_hash": "971389fee6094bc1e86cbe752b331d8c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 30.776,
"alnum_prop": 0.5204055107876268,
"repo_name": "ceache/treadmill",
"id": "669ef0c6d2d522ea977348f31e3a1bfb0cd3700f",
"size": "3847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/runtime/linux/_manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3362298"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
} |
import sublime
from . import st_utils
class Session:
def __init__(self, name, window_sessions):
self.name = name
self.windows = window_sessions
@classmethod
def save(cls, name, st_windows):
return cls(
name,
[Window.save(w) for w in st_windows]
)
def load(self):
for window in self.windows:
window.load()
class Window:
def __init__(self, project, project_path, view_sessions):
self.project = project
self.project_path = project_path
self.views = view_sessions
@classmethod
def save(cls, st_window):
project = st_window.project_data()
project_path = st_window.project_file_name()
views = [View.save(v) for v in st_window.views()]
return cls(project, project_path, views)
def load(self):
st_window = st_utils.open_window()
self._load_project(st_window)
self._load_views(st_window)
def _load_project(self, st_window):
hacked_project = st_utils.resolve_project_paths(self.project_path, self.project)
st_window.set_project_data(hacked_project)
def _load_views(self, st_window):
# Workaround: Sublime focus bug on new views (issue #39)
sublime.set_timeout(lambda: self._load_views_intern(st_window), 0)
def _load_views_intern(self, st_window):
for view in self.views:
view.load(st_window)
class View:
def __init__(self, file_path, active, sel_regions, visible_region):
self.file_path = file_path
self.active = active
self.sel_regions = sel_regions
self.visible_region = visible_region
@classmethod
def save(cls, st_view):
file_path = st_view.file_name()
active = (st_view.id() == st_view.window().active_view().id())
sel_regions = [region for region in st_view.sel()]
visible_region = st_view.visible_region()
return cls(file_path, active, sel_regions, visible_region)
def load(self, st_window):
view = st_window.open_file(self.file_path)
sublime.set_timeout(lambda: self._init_view(view), 50)
def _init_view(self, view):
if view.is_loading():
sublime.set_timeout(lambda: self._init_view(view), 50)
return
selection = view.sel()
selection.clear()
selection.add_all(self.sel_regions)
view.show_at_center(self.visible_region)
if self.active:
view.window().focus_view(view)
| {
"content_hash": "41a24426d7feb16a85f71d4ee82e8030",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 88,
"avg_line_length": 28.772727272727273,
"alnum_prop": 0.6042654028436019,
"repo_name": "Zeeker/sublime-SessionManager",
"id": "16b563276909683170d5c49337694037f76425ac",
"size": "2532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15080"
}
],
"symlink_target": ""
} |
from __future__ import division
import os
import time
from imposm.geom import (
PolygonBuilder,
LineStringBuilder,
InvalidGeometryError,
IncompletePolygonError,
)
from imposm.merge import merge
import imposm.base
import imposm.geom
import imposm.config
import shapely.geometry
import shapely.ops
import shapely.geos
import shapely.prepared
import logging
log = logging.getLogger(__name__)
def RelationBuilder(*args, **kw):
if imposm.config.relation_builder == 'contains':
return ContainsRelationBuilder(*args, **kw)
if imposm.config.relation_builder == 'union':
return UnionRelationBuilder(*args, **kw)
raise ValueError('unknown relation_builder "%s"'
% (imposm.config.relation_builder, ))
class RelationBuilderBase(object):
validate_rings = True
def __init__(self, relation, ways_cache, coords_cache):
self.relation = relation
self.polygon_builder = PolygonBuilder()
self.linestring_builder = LineStringBuilder()
self.ways_cache = ways_cache
self.coords_cache = coords_cache
def fetch_ways(self):
ways = []
for member in self.relation.members:
# skip label nodes, relations of relations, etc
if member[1] != 'way': continue
way = self.ways_cache.get(member[0])
if way is None:
log.debug('way not found %s:%s', self.relation.osm_id, member[0])
if imposm.config.import_partial_relations:
continue
else:
raise IncompletePolygonError('way not found %s:%s' % (self.relation.osm_id, member[0]))
if way.partial_refs:
log.warn('multiple linestrings in way %s (relation %s)',
member[0], self.relation.osm_id)
raise IncompletePolygonError()
way.coords = self.fetch_way_coords(way)
if way.coords is None:
if not imposm.config.import_partial_relations:
raise IncompletePolygonError()
else:
ways.append(way)
return ways
def build_rings(self, ways):
rings = []
incomplete_rings = []
for ring in (Ring(w) for w in ways):
if ring.is_closed():
ring.geom = self.polygon_builder.build_checked_geom(ring, validate=self.validate_rings)
rings.append(ring)
else:
incomplete_rings.append(ring)
merged_rings = self.build_ring_from_incomplete(incomplete_rings)
if len(rings) + len(merged_rings) == 0:
raise IncompletePolygonError('linestrings from relation %s have no rings' % (self.relation.osm_id, ))
return rings + merged_rings
def build_ring_from_incomplete(self, incomplete_rings):
rings = merge_rings(incomplete_rings)
for ring in rings[:]:
if not ring.is_closed():
if imposm.config.import_partial_relations:
rings.remove(ring)
continue
else:
raise InvalidGeometryError('linestrings from relation %s do not form a ring' %
self.relation.osm_id)
ring.geom = self.polygon_builder.build_checked_geom(ring, validate=self.validate_rings)
return rings
def fetch_way_coords(self, way):
"""
Fetch all coordinates of way.refs.
"""
coords = self.coords_cache.get_coords(way.refs)
if coords is None:
log.debug('missing coord from way %s in relation %s',
way.osm_id, self.relation.osm_id)
return None
return coords
def build_relation_geometry(self, rings):
"""
Build relation geometry from rings.
"""
raise NotImplementedError()
def build(self):
try:
time_start = time.time()
ways = self.fetch_ways()
time_ways = time.time() - time_start
if not ways:
raise IncompletePolygonError('no ways found')
time_start = time.time()
rings = self.build_rings(ways)
time_rings = time.time() - time_start
if (imposm.config.imposm_multipolygon_max_ring
and len(rings) > imposm.config.imposm_multipolygon_max_ring):
log.warn('skipping relation %d with %d ways (%.1fms) and %d rings (%.1fms): too many rings',
self.relation.osm_id, len(ways), time_ways*1000, len(rings), time_rings*1000)
raise IncompletePolygonError('skipping too large multipolygon')
time_start = time.time()
self.build_relation_geometry(rings)
time_relations = time.time() - time_start
if time_ways + time_rings + time_relations > imposm.config.imposm_multipolygon_report:
log.warn('building relation %d with %d ways (%.1fms) and %d rings (%.1fms) took %.1fms',
self.relation.osm_id, len(ways), time_ways*1000, len(rings), time_rings*1000, time_relations*1000)
except InvalidGeometryError, ex:
log.debug(ex)
raise IncompletePolygonError(ex)
except IncompletePolygonError:
raise
except Exception, ex:
log.warn('error while building multipolygon:')
log.exception(ex)
raise IncompletePolygonError(ex)
class UnionRelationBuilder(RelationBuilderBase):
def build_relation_geometry(self, rings):
"""
Build relation geometry from rings.
"""
rings.sort(key=lambda x: x.geom.area, reverse=True)
# add/subtract all rings from largest
polygon = rings[0]
rel_tags = relation_tags(self.relation.tags, polygon.tags)
polygon.mark_as_inserted(rel_tags)
geom = polygon.geom
for r in rings[1:]:
if geom.contains(r.geom):
# inside -> hole -> subtract
geom = geom.difference(r.geom)
r.mark_as_inserted(rel_tags)
else:
# outside or overlap -> merge(union) to multipolygon or to polygon
try:
geom = geom.union(r.geom)
except shapely.geos.TopologicalError:
raise InvalidGeometryError('multipolygon relation (%s) result is invalid'
' (topological error)' % self.relation.osm_id)
r.mark_as_inserted(rel_tags)
if not geom.is_valid:
raise InvalidGeometryError('multipolygon relation (%s) result is invalid' %
self.relation.osm_id)
self.relation.geom = geom
self.relation.tags = rel_tags
all_ways = polygon.ways
for r in rings:
all_ways.extend(r.ways)
self.relation.ways = all_ways
class ContainsRelationBuilder(RelationBuilderBase):
validate_rings = False
def _ring_is_hole(self, rings, idx):
"""
Returns True if rings[idx] is a hole, False if it is a
shell (also if hole in a hole, etc)
"""
contained_counter = 0
while True:
idx = rings[idx].contained_by
if idx is None:
break
contained_counter += 1
return contained_counter % 2 == 1
def build_relation_geometry(self, rings):
"""
Build relation geometry from rings.
"""
rings.sort(key=lambda x: x.geom.area, reverse=True)
total_rings = len(rings)
shells = set([rings[0]])
for i in xrange(total_rings):
test_geom = shapely.prepared.prep(rings[i].geom)
for j in xrange(i+1, total_rings):
if test_geom.contains(rings[j].geom):
# j in inside of i
if rings[j].contained_by is not None:
# j is inside a larger ring, remove that relationship
# e.g. j is hole inside a hole (i)
rings[rings[j].contained_by].holes.discard(rings[j])
shells.discard(rings[j])
# remember parent
rings[j].contained_by = i
# add ring as hole or shell
if self._ring_is_hole(rings, j):
rings[i].holes.add(rings[j])
else:
shells.add(rings[j])
if rings[i].contained_by is None:
# add as shell if it is not a hole
shells.add(rings[i])
rel_tags = relation_tags(self.relation.tags, rings[0].tags)
# build polygons from rings
polygons = []
for shell in shells:
shell.mark_as_inserted(rel_tags)
exterior = shell.geom.exterior
interiors = []
for hole in shell.holes:
hole.mark_as_inserted(rel_tags)
interiors.append(hole.geom.exterior)
polygons.append(shapely.geometry.Polygon(exterior, interiors))
if len(polygons) == 1:
geom = polygons[0]
else:
geom = shapely.geometry.MultiPolygon(polygons)
geom = imposm.geom.validate_and_simplify(geom)
if not geom.is_valid:
raise InvalidGeometryError('multipolygon relation (%s) result is invalid' %
self.relation.osm_id)
self.relation.geom = geom
self.relation.tags = rel_tags
all_ways = []
for r in rings:
all_ways.extend(r.ways)
self.relation.ways = all_ways
def relation_tags(rel_tags, way_tags):
result = dict(rel_tags)
if 'type' in result: del result['type']
if 'name' in result: del result['name']
if not result:
# use way_tags
result.update(way_tags)
else:
if 'name' in rel_tags:
# put back name
result['name'] = rel_tags['name']
return result
def tags_differ(a, b):
a_ = dict(a)
a_.pop('name', None)
b_ = dict(b)
b_.pop('name', None)
return a_ != b_
def tags_same_or_empty(a, b):
return (
not b or
not tags_differ(a, b)
)
def merge_rings(rings):
"""
Merge rings at the endpoints.
"""
endpoints = {}
for ring in rings:
if len(ring.refs) < 2:
continue
left = ring.refs[0]
right = ring.refs[-1]
orig_ring = None
if left in endpoints:
orig_ring = endpoints.pop(left)
if left == orig_ring.refs[-1]:
orig_ring.refs = orig_ring.refs + ring.refs[1:]
orig_ring.coords = orig_ring.coords + ring.coords[1:]
else:
orig_ring.refs = orig_ring.refs[::-1] + ring.refs[1:]
orig_ring.coords = orig_ring.coords[::-1] + ring.coords[1:]
orig_ring.ways.extend(ring.ways)
orig_ring.tags.update(ring.tags)
if right in endpoints and endpoints[right] is not orig_ring:
# close gap
ring = endpoints.pop(right)
if right == ring.refs[0]:
orig_ring.refs = orig_ring.refs + ring.refs[1:]
orig_ring.coords = orig_ring.coords + ring.coords[1:]
else:
orig_ring.refs = orig_ring.refs[:-1] + ring.refs[::-1]
orig_ring.coords = orig_ring.coords[:-1] + ring.coords[::-1]
orig_ring.ways.extend(ring.ways)
orig_ring.tags.update(ring.tags)
right = orig_ring.refs[-1]
endpoints[right] = orig_ring
else:
endpoints[right] = orig_ring
elif right in endpoints:
orig_ring = endpoints.pop(right)
if right == orig_ring.refs[0]:
orig_ring.refs = ring.refs[:-1] + orig_ring.refs
orig_ring.coords = ring.coords[:-1] + orig_ring.coords
else:
orig_ring.refs = orig_ring.refs[:-1] + ring.refs[::-1]
orig_ring.coords = orig_ring.coords[:-1] + ring.coords[::-1]
orig_ring.ways.extend(ring.ways)
orig_ring.tags.update(ring.tags)
endpoints[left] = orig_ring
else:
endpoints[left] = ring
endpoints[right] = ring
return list(set(endpoints.values()))
class Ring(object):
"""
Represents a ring (i.e. polygon without holes) build from one
or more ways. Stores references to the building ways.
"""
def __init__(self, way):
self.ways = [way]
self.osm_id = way.osm_id
self.refs = way.refs
self.coords = way.coords
self.tags = dict(way.tags)
self.inserted = way.inserted
self.contained_by = None
self.holes = set()
def __repr__(self):
return 'Ring(%r, %r, %r)' % (self.osm_id, self.tags, self.ways)
def merge(self, ring, without_refs=False):
"""
Try to merge `ring.refs` with this ring.
Returns `self` on success, else `None`.
"""
if without_refs:
result = None
else:
result = merge(self.refs, ring.refs)
if result is None:
return None
self.ways.extend(ring.ways)
self.refs = [result]
self.tags.update(ring.tags)
return self
def is_closed(self):
return len(self.refs) >= 4 and self.refs[0] == self.refs[-1]
def mark_as_inserted(self, tags):
for w in self.ways:
if tags_same_or_empty(tags, w.tags):
w.inserted = True
if tags_same_or_empty(tags, self.tags):
self.inserted = True
| {
"content_hash": "f96ceef2ad3b458a95f569dc89fe2870",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 118,
"avg_line_length": 35.44358974358974,
"alnum_prop": 0.5487231425884396,
"repo_name": "omniscale/imposm",
"id": "11b72757716af4ece5c5b8b088ec7761896049f4",
"size": "14420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "imposm/multipolygon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "257"
},
{
"name": "Python",
"bytes": "199218"
}
],
"symlink_target": ""
} |
import json
import time
from time import gmtime, strftime
import datetime
import sys
from dateutil import parser
import calendar
from TrendAnalyser import TrendAnalyser
start_time = time.time()
TA = TrendAnalyser(load_api=False, load_db=False)
end_time = time.time()
print "Time Taken:", end_time - start_time
| {
"content_hash": "93222fc1ee62f8a512cab0ec32fed689",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 20.866666666666667,
"alnum_prop": 0.7827476038338658,
"repo_name": "chewett/TrendAnalyser",
"id": "99423ac737be9815ce9eeb98e7103a343d440556",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_init_no_api_no_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "697"
},
{
"name": "JavaScript",
"bytes": "165"
},
{
"name": "Python",
"bytes": "36946"
},
{
"name": "Shell",
"bytes": "390"
}
],
"symlink_target": ""
} |
"""Support for monitoring an SABnzbd NZB client."""
from datetime import timedelta
import logging
from pysabnzbd import SabnzbdApi, SabnzbdApiException
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_SABNZBD
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
DATA_GIGABYTES,
DATA_MEGABYTES,
DATA_RATE_MEGABYTES_PER_SECOND,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DOMAIN = "sabnzbd"
DATA_SABNZBD = "sabznbd"
_CONFIGURING = {}
ATTR_SPEED = "speed"
BASE_URL_FORMAT = "{}://{}:{}/"
CONFIG_FILE = "sabnzbd.conf"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "SABnzbd"
DEFAULT_PORT = 8080
DEFAULT_SPEED_LIMIT = "100"
DEFAULT_SSL = False
UPDATE_INTERVAL = timedelta(seconds=30)
SERVICE_PAUSE = "pause"
SERVICE_RESUME = "resume"
SERVICE_SET_SPEED = "set_speed"
SIGNAL_SABNZBD_UPDATED = "sabnzbd_updated"
SENSOR_TYPES = {
"current_status": ["Status", None, "status"],
"speed": ["Speed", DATA_RATE_MEGABYTES_PER_SECOND, "kbpersec"],
"queue_size": ["Queue", DATA_MEGABYTES, "mb"],
"queue_remaining": ["Left", DATA_MEGABYTES, "mbleft"],
"disk_size": ["Disk", DATA_GIGABYTES, "diskspacetotal1"],
"disk_free": ["Disk Free", DATA_GIGABYTES, "diskspace1"],
"queue_count": ["Queue Count", None, "noofslots_total"],
"day_size": ["Daily Total", DATA_GIGABYTES, "day_size"],
"week_size": ["Weekly Total", DATA_GIGABYTES, "week_size"],
"month_size": ["Monthly Total", DATA_GIGABYTES, "month_size"],
"total_size": ["Total", DATA_GIGABYTES, "total_size"],
}
SPEED_LIMIT_SCHEMA = vol.Schema(
{vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PATH): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_check_sabnzbd(sab_api):
"""Check if we can reach SABnzbd."""
try:
await sab_api.check_available()
return True
except SabnzbdApiException:
_LOGGER.error("Connection to SABnzbd API failed")
return False
async def async_configure_sabnzbd(
hass, config, use_ssl, name=DEFAULT_NAME, api_key=None
):
"""Try to configure Sabnzbd and request api key if configuration fails."""
host = config[CONF_HOST]
port = config[CONF_PORT]
web_root = config.get(CONF_PATH)
uri_scheme = "https" if use_ssl else "http"
base_url = BASE_URL_FORMAT.format(uri_scheme, host, port)
if api_key is None:
conf = await hass.async_add_job(load_json, hass.config.path(CONFIG_FILE))
api_key = conf.get(base_url, {}).get(CONF_API_KEY, "")
sab_api = SabnzbdApi(
base_url, api_key, web_root=web_root, session=async_get_clientsession(hass)
)
if await async_check_sabnzbd(sab_api):
async_setup_sabnzbd(hass, sab_api, config, name)
else:
async_request_configuration(hass, config, base_url, web_root)
async def async_setup(hass, config):
"""Set up the SABnzbd component."""
async def sabnzbd_discovered(service, info):
"""Handle service discovery."""
ssl = info.get("properties", {}).get("https", "0") == "1"
await async_configure_sabnzbd(hass, info, ssl)
discovery.async_listen(hass, SERVICE_SABNZBD, sabnzbd_discovered)
conf = config.get(DOMAIN)
if conf is not None:
use_ssl = conf.get(CONF_SSL)
name = conf.get(CONF_NAME)
api_key = conf.get(CONF_API_KEY)
await async_configure_sabnzbd(hass, conf, use_ssl, name, api_key)
return True
@callback
def async_setup_sabnzbd(hass, sab_api, config, name):
"""Set up SABnzbd sensors and services."""
sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {}))
if config.get(CONF_SENSORS):
hass.data[DATA_SABNZBD] = sab_api_data
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
async def async_service_handler(service):
"""Handle service calls."""
if service.service == SERVICE_PAUSE:
await sab_api_data.async_pause_queue()
elif service.service == SERVICE_RESUME:
await sab_api_data.async_resume_queue()
elif service.service == SERVICE_SET_SPEED:
speed = service.data.get(ATTR_SPEED)
await sab_api_data.async_set_queue_speed(speed)
hass.services.async_register(
DOMAIN, SERVICE_PAUSE, async_service_handler, schema=vol.Schema({})
)
hass.services.async_register(
DOMAIN, SERVICE_RESUME, async_service_handler, schema=vol.Schema({})
)
hass.services.async_register(
DOMAIN, SERVICE_SET_SPEED, async_service_handler, schema=SPEED_LIMIT_SCHEMA
)
async def async_update_sabnzbd(now):
"""Refresh SABnzbd queue data."""
try:
await sab_api.refresh_data()
async_dispatcher_send(hass, SIGNAL_SABNZBD_UPDATED, None)
except SabnzbdApiException as err:
_LOGGER.error(err)
async_track_time_interval(hass, async_update_sabnzbd, UPDATE_INTERVAL)
@callback
def async_request_configuration(hass, config, host, web_root):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.async_notify_errors(
_CONFIGURING[host], "Failed to register, please try again."
)
return
async def async_configuration_callback(data):
"""Handle configuration changes."""
api_key = data.get(CONF_API_KEY)
sab_api = SabnzbdApi(
host, api_key, web_root=web_root, session=async_get_clientsession(hass)
)
if not await async_check_sabnzbd(sab_api):
return
def success():
"""Signal successful setup."""
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {CONF_API_KEY: api_key}
save_json(hass.config.path(CONFIG_FILE), conf)
req_config = _CONFIGURING.pop(host)
configurator.request_done(req_config)
hass.async_add_job(success)
async_setup_sabnzbd(hass, sab_api, config, config.get(CONF_NAME, DEFAULT_NAME))
_CONFIGURING[host] = configurator.async_request_config(
DEFAULT_NAME,
async_configuration_callback,
description="Enter the API Key",
submit_caption="Confirm",
fields=[{"id": CONF_API_KEY, "name": "API Key", "type": ""}],
)
class SabnzbdApiData:
"""Class for storing/refreshing sabnzbd api queue data."""
def __init__(self, sab_api, name, sensors):
"""Initialize component."""
self.sab_api = sab_api
self.name = name
self.sensors = sensors
async def async_pause_queue(self):
"""Pause Sabnzbd queue."""
try:
return await self.sab_api.pause_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_resume_queue(self):
"""Resume Sabnzbd queue."""
try:
return await self.sab_api.resume_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_set_queue_speed(self, limit):
"""Set speed limit for the Sabnzbd queue."""
try:
return await self.sab_api.set_speed_limit(limit)
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
def get_queue_field(self, field):
"""Return the value for the given field from the Sabnzbd queue."""
return self.sab_api.queue.get(field)
| {
"content_hash": "d0aa8ad89c683bede3d04c2ca047ec98",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 87,
"avg_line_length": 32.486988847583646,
"alnum_prop": 0.6368005492619293,
"repo_name": "postlund/home-assistant",
"id": "b36abbedb487387fd856e84d55225ebcbdf26008",
"size": "8739",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sabnzbd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
"""Tests for Adamax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adamax_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**(t + 1))) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
def adamax_sparse_update_numpy(param,
indices,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param)
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t))
param_t_slice = param[indices] - (
(alpha / (1 - beta1**(t + 1))) * (m_t_slice / (v_t_slice + epsilon)))
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
param_t[indices] = param_t_slice
return param_t, m_t, v_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
return beta_1_power
class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
def testResourceSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([2, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adamax.Adamax()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
self.assertAllClose([4.0, 5.0, 6.0], var1.eval())
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
var0_np, grads0_np_indices, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_sparse_update_numpy(
var1_np, grads1_np_indices, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adamax.Adamax(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adamax.Adamax().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adamax.Adamax().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0 = np.array([0.0, 0.0])
v0 = np.array([0.0, 0.0])
m1 = np.array([0.0, 0.0])
v1 = np.array([0.0, 0.0])
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax()
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasicWithLearningRateDecay(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.002
opt = adamax.Adamax(learning_rate=learning_rate, decay=decay)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0, alpha=lr)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1, alpha=lr)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0),
rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1),
rtol=1e-2)
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adamax1 and Adamax2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adamax.Adamax(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len({id(v) for v in opt.variables()}))
def testConstructAdamaxWithLR(self):
opt = adamax.Adamax(lr=1.0)
opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0)
opt_3 = adamax.Adamax(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
| {
"content_hash": "f6dfbb199a8b7dfd0025f5789188c06b",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 113,
"avg_line_length": 43.51086956521739,
"alnum_prop": 0.5992380714464152,
"repo_name": "gunan/tensorflow",
"id": "07c7ee96eeb3ce34b4d24effbbd03c5a930be52e",
"size": "16701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/optimizer_v2/adamax_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import operator
import warnings
import weakref
from bson import DBRef, ObjectId, SON
import pymongo
import six
from mongoengine.base.common import UPDATE_OPERATORS
from mongoengine.base.datastructures import (BaseDict, BaseList,
EmbeddedDocumentList)
from mongoengine.common import _import_class
from mongoengine.errors import ValidationError
__all__ = ('BaseField', 'ComplexBaseField', 'ObjectIdField',
'GeoJsonBaseField')
class BaseField(object):
"""A base class for fields in a MongoDB document. Instances of this class
may be added to subclasses of `Document` to define a document's schema.
.. versionchanged:: 0.5 - added verbose and help text
"""
name = None
_geo_index = False
_auto_gen = False # Call `generate` to generate a value
_auto_dereference = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that MongoEngine implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
def __init__(self, db_field=None, name=None, required=False, default=None,
unique=False, unique_with=None, primary_key=False,
validation=None, choices=None, null=False, sparse=False,
**kwargs):
"""
:param db_field: The database field to store this field in
(defaults to the name of the field)
:param name: Deprecated - use db_field
:param required: If the field is required. Whether it has to have a
value or not. Defaults to False.
:param default: (optional) The default value for this field if no value
has been set (or if the value has been unset). It can be a
callable.
:param unique: Is the field value unique or not. Defaults to False.
:param unique_with: (optional) The other field this field should be
unique with.
:param primary_key: Mark this field as the primary key. Defaults to False.
:param validation: (optional) A callable to validate the value of the
field. Generally this is deprecated in favour of the
`FIELD.validate` method
:param choices: (optional) The valid choices
:param null: (optional) Is the field value can be null. If no and there is a default value
then the default value is set
:param sparse: (optional) `sparse=True` combined with `unique=True` and `required=False`
means that uniqueness won't be enforced for `None` values
:param **kwargs: (optional) Arbitrary indirection-free metadata for
this field can be supplied as additional keyword arguments and
accessed as attributes of the field. Must not conflict with any
existing attributes. Common metadata includes `verbose_name` and
`help_text`.
"""
self.db_field = (db_field or name) if not primary_key else '_id'
if name:
msg = 'Field\'s "name" attribute deprecated in favour of "db_field"'
warnings.warn(msg, DeprecationWarning)
self.required = required or primary_key
self.default = default
self.unique = bool(unique or unique_with)
self.unique_with = unique_with
self.primary_key = primary_key
self.validation = validation
self.choices = choices
self.null = null
self.sparse = sparse
self._owner_document = None
# Make sure db_field is a string (if it's explicitly defined).
if (
self.db_field is not None and
not isinstance(self.db_field, six.string_types)
):
raise TypeError('db_field should be a string.')
# Make sure db_field doesn't contain any forbidden characters.
if isinstance(self.db_field, six.string_types) and (
'.' in self.db_field or
'\0' in self.db_field or
self.db_field.startswith('$')
):
raise ValueError(
'field names cannot contain dots (".") or null characters '
'("\\0"), and they must not start with a dollar sign ("$").'
)
# Detect and report conflicts between metadata and base properties.
conflicts = set(dir(self)) & set(kwargs)
if conflicts:
raise TypeError('%s already has attribute(s): %s' % (
self.__class__.__name__, ', '.join(conflicts)))
# Assign metadata to the instance
# This efficient method is available because no __slots__ are defined.
self.__dict__.update(kwargs)
# Adjust the appropriate creation counter, and save our local copy.
if self.db_field == '_id':
self.creation_counter = BaseField.auto_creation_counter
BaseField.auto_creation_counter -= 1
else:
self.creation_counter = BaseField.creation_counter
BaseField.creation_counter += 1
def __get__(self, instance, owner):
"""Descriptor for retrieving a value from a field in a document.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
return instance._data.get(self.name)
def __set__(self, instance, value):
"""Descriptor for assigning a value to a field in a document.
"""
# If setting to None and there is a default
# Then set the value to the default value
if value is None:
if self.null:
value = None
elif self.default is not None:
value = self.default
if callable(value):
value = value()
if instance._initialised:
try:
if (self.name not in instance._data or
instance._data[self.name] != value):
instance._mark_as_changed(self.name)
except Exception:
# Values cant be compared eg: naive and tz datetimes
# So mark it as changed
instance._mark_as_changed(self.name)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument):
value._instance = weakref.proxy(instance)
elif isinstance(value, (list, tuple)):
for v in value:
if isinstance(v, EmbeddedDocument):
v._instance = weakref.proxy(instance)
instance._data[self.name] = value
def error(self, message='', errors=None, field_name=None):
"""Raise a ValidationError."""
field_name = field_name if field_name else self.name
raise ValidationError(message, errors=errors, field_name=field_name)
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
return value
def to_mongo(self, value):
"""Convert a Python type to a MongoDB-compatible type."""
return self.to_python(value)
def _to_mongo_safe_call(self, value, use_db_field=True, fields=None):
"""Helper method to call to_mongo with proper inputs."""
f_inputs = self.to_mongo.__code__.co_varnames
ex_vars = {}
if 'fields' in f_inputs:
ex_vars['fields'] = fields
if 'use_db_field' in f_inputs:
ex_vars['use_db_field'] = use_db_field
return self.to_mongo(value, **ex_vars)
def prepare_query_value(self, op, value):
"""Prepare a value that is being used in a query for PyMongo."""
if op in UPDATE_OPERATORS:
self.validate(value)
return value
def validate(self, value, clean=True):
"""Perform validation on a value."""
pass
def _validate_choices(self, value):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
choice_list = self.choices
if isinstance(next(iter(choice_list)), (list, tuple)):
# next(iter) is useful for sets
choice_list = [k for k, _ in choice_list]
# Choices which are other types of Documents
if isinstance(value, (Document, EmbeddedDocument)):
if not any(isinstance(value, c) for c in choice_list):
self.error(
'Value must be an instance of %s' % (
six.text_type(choice_list)
)
)
# Choices which are types other than Documents
elif value not in choice_list:
self.error('Value must be one of %s' % six.text_type(choice_list))
def _validate(self, value, **kwargs):
# Check the Choices Constraint
if self.choices:
self._validate_choices(value)
# check validation argument
if self.validation is not None:
if callable(self.validation):
if not self.validation(value):
self.error('Value does not match custom validation method')
else:
raise ValueError('validation argument for "%s" must be a '
'callable.' % self.name)
self.validate(value, **kwargs)
@property
def owner_document(self):
return self._owner_document
def _set_owner_document(self, owner_document):
self._owner_document = owner_document
@owner_document.setter
def owner_document(self, owner_document):
self._set_owner_document(owner_document)
class ComplexBaseField(BaseField):
"""Handles complex fields, such as lists / dictionaries.
Allows for nesting of embedded documents inside complex types.
Handles the lazy dereferencing of a queryset by lazily dereferencing all
items in a list / dict rather than one at a time.
.. versionadded:: 0.5
"""
field = None
def __get__(self, instance, owner):
"""Descriptor to automatically dereference references."""
if instance is None:
# Document class being used rather than a document object
return self
ReferenceField = _import_class('ReferenceField')
GenericReferenceField = _import_class('GenericReferenceField')
EmbeddedDocumentListField = _import_class('EmbeddedDocumentListField')
dereference = (self._auto_dereference and
(self.field is None or isinstance(self.field,
(GenericReferenceField, ReferenceField))))
_dereference = _import_class('DeReference')()
self._auto_dereference = instance._fields[self.name]._auto_dereference
if instance._initialised and dereference and instance._data.get(self.name):
instance._data[self.name] = _dereference(
instance._data.get(self.name), max_depth=1, instance=instance,
name=self.name
)
value = super(ComplexBaseField, self).__get__(instance, owner)
# Convert lists / values so we can watch for any changes on them
if isinstance(value, (list, tuple)):
if (issubclass(type(self), EmbeddedDocumentListField) and
not isinstance(value, EmbeddedDocumentList)):
value = EmbeddedDocumentList(value, instance, self.name)
elif not isinstance(value, BaseList):
value = BaseList(value, instance, self.name)
instance._data[self.name] = value
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, instance, self.name)
instance._data[self.name] = value
if (self._auto_dereference and instance._initialised and
isinstance(value, (BaseList, BaseDict)) and
not value._dereferenced):
value = _dereference(
value, max_depth=1, instance=instance, name=self.name
)
value._dereferenced = True
instance._data[self.name] = value
return value
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if isinstance(value, six.string_types):
return value
if hasattr(value, 'to_python'):
return value.to_python()
is_list = False
if not hasattr(value, 'items'):
try:
is_list = True
value = {k: v for k, v in enumerate(value)}
except TypeError: # Not iterable return the value
return value
if self.field:
self.field._auto_dereference = self._auto_dereference
value_dict = {key: self.field.to_python(item)
for key, item in value.items()}
else:
Document = _import_class('Document')
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error('You can only reference documents once they'
' have been saved to the database')
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, 'to_python'):
value_dict[k] = v.to_python()
else:
value_dict[k] = self.to_python(v)
if is_list: # Convert back to a list
return [v for _, v in sorted(value_dict.items(),
key=operator.itemgetter(0))]
return value_dict
def to_mongo(self, value, use_db_field=True, fields=None):
"""Convert a Python type to a MongoDB-compatible type."""
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
GenericReferenceField = _import_class('GenericReferenceField')
if isinstance(value, six.string_types):
return value
if hasattr(value, 'to_mongo'):
if isinstance(value, Document):
return GenericReferenceField().to_mongo(value)
cls = value.__class__
val = value.to_mongo(use_db_field, fields)
# If it's a document that is not inherited add _cls
if isinstance(value, EmbeddedDocument):
val['_cls'] = cls.__name__
return val
is_list = False
if not hasattr(value, 'items'):
try:
is_list = True
value = {k: v for k, v in enumerate(value)}
except TypeError: # Not iterable return the value
return value
if self.field:
value_dict = {
key: self.field._to_mongo_safe_call(item, use_db_field, fields)
for key, item in value.iteritems()
}
else:
value_dict = {}
for k, v in value.iteritems():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error('You can only reference documents once they'
' have been saved to the database')
# If its a document that is not inheritable it won't have
# any _cls data so make it a generic reference allows
# us to dereference
meta = getattr(v, '_meta', {})
allow_inheritance = meta.get('allow_inheritance')
if not allow_inheritance and not self.field:
value_dict[k] = GenericReferenceField().to_mongo(v)
else:
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, 'to_mongo'):
cls = v.__class__
val = v.to_mongo(use_db_field, fields)
# If it's a document that is not inherited add _cls
if isinstance(v, (Document, EmbeddedDocument)):
val['_cls'] = cls.__name__
value_dict[k] = val
else:
value_dict[k] = self.to_mongo(v, use_db_field, fields)
if is_list: # Convert back to a list
return [v for _, v in sorted(value_dict.items(),
key=operator.itemgetter(0))]
return value_dict
def validate(self, value):
"""If field is provided ensure the value is valid."""
errors = {}
if self.field:
if hasattr(value, 'iteritems') or hasattr(value, 'items'):
sequence = value.iteritems()
else:
sequence = enumerate(value)
for k, v in sequence:
try:
self.field._validate(v)
except ValidationError as error:
errors[k] = error.errors or error
except (ValueError, AssertionError) as error:
errors[k] = error
if errors:
field_class = self.field.__class__.__name__
self.error('Invalid %s item (%s)' % (field_class, value),
errors=errors)
# Don't allow empty values if required
if self.required and not value:
self.error('Field is required and cannot be empty')
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def lookup_member(self, member_name):
if self.field:
return self.field.lookup_member(member_name)
return None
def _set_owner_document(self, owner_document):
if self.field:
self.field.owner_document = owner_document
self._owner_document = owner_document
class ObjectIdField(BaseField):
"""A field wrapper around MongoDB's ObjectIds."""
def to_python(self, value):
try:
if not isinstance(value, ObjectId):
value = ObjectId(value)
except Exception:
pass
return value
def to_mongo(self, value):
if not isinstance(value, ObjectId):
try:
return ObjectId(six.text_type(value))
except Exception as e:
# e.message attribute has been deprecated since Python 2.6
self.error(six.text_type(e))
return value
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def validate(self, value):
try:
ObjectId(six.text_type(value))
except Exception:
self.error('Invalid Object ID')
class GeoJsonBaseField(BaseField):
"""A geo json field storing a geojson style object.
.. versionadded:: 0.8
"""
_geo_index = pymongo.GEOSPHERE
_type = 'GeoBase'
def __init__(self, auto_index=True, *args, **kwargs):
"""
:param bool auto_index: Automatically create a '2dsphere' index.\
Defaults to `True`.
"""
self._name = '%sField' % self._type
if not auto_index:
self._geo_index = False
super(GeoJsonBaseField, self).__init__(*args, **kwargs)
def validate(self, value):
"""Validate the GeoJson object based on its type."""
if isinstance(value, dict):
if set(value.keys()) == set(['type', 'coordinates']):
if value['type'] != self._type:
self.error('%s type must be "%s"' %
(self._name, self._type))
return self.validate(value['coordinates'])
else:
self.error('%s can only accept a valid GeoJson dictionary'
' or lists of (x, y)' % self._name)
return
elif not isinstance(value, (list, tuple)):
self.error('%s can only accept lists of [x, y]' % self._name)
return
validate = getattr(self, '_validate_%s' % self._type.lower())
error = validate(value)
if error:
self.error(error)
def _validate_polygon(self, value, top_level=True):
if not isinstance(value, (list, tuple)):
return 'Polygons must contain list of linestrings'
# Quick and dirty validator
try:
value[0][0][0]
except (TypeError, IndexError):
return 'Invalid Polygon must contain at least one valid linestring'
errors = []
for val in value:
error = self._validate_linestring(val, False)
if not error and val[0] != val[-1]:
error = 'LineStrings must start and end at the same point'
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return 'Invalid Polygon:\n%s' % ', '.join(errors)
else:
return '%s' % ', '.join(errors)
def _validate_linestring(self, value, top_level=True):
"""Validate a linestring."""
if not isinstance(value, (list, tuple)):
return 'LineStrings must contain list of coordinate pairs'
# Quick and dirty validator
try:
value[0][0]
except (TypeError, IndexError):
return 'Invalid LineString must contain at least one valid point'
errors = []
for val in value:
error = self._validate_point(val)
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return 'Invalid LineString:\n%s' % ', '.join(errors)
else:
return '%s' % ', '.join(errors)
def _validate_point(self, value):
"""Validate each set of coords"""
if not isinstance(value, (list, tuple)):
return 'Points must be a list of coordinate pairs'
elif not len(value) == 2:
return 'Value (%s) must be a two-dimensional point' % repr(value)
elif (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
return 'Both values (%s) in point must be float or int' % repr(value)
def _validate_multipoint(self, value):
if not isinstance(value, (list, tuple)):
return 'MultiPoint must be a list of Point'
# Quick and dirty validator
try:
value[0][0]
except (TypeError, IndexError):
return 'Invalid MultiPoint must contain at least one valid point'
errors = []
for point in value:
error = self._validate_point(point)
if error and error not in errors:
errors.append(error)
if errors:
return '%s' % ', '.join(errors)
def _validate_multilinestring(self, value, top_level=True):
if not isinstance(value, (list, tuple)):
return 'MultiLineString must be a list of LineString'
# Quick and dirty validator
try:
value[0][0][0]
except (TypeError, IndexError):
return 'Invalid MultiLineString must contain at least one valid linestring'
errors = []
for linestring in value:
error = self._validate_linestring(linestring, False)
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return 'Invalid MultiLineString:\n%s' % ', '.join(errors)
else:
return '%s' % ', '.join(errors)
def _validate_multipolygon(self, value):
if not isinstance(value, (list, tuple)):
return 'MultiPolygon must be a list of Polygon'
# Quick and dirty validator
try:
value[0][0][0][0]
except (TypeError, IndexError):
return 'Invalid MultiPolygon must contain at least one valid Polygon'
errors = []
for polygon in value:
error = self._validate_polygon(polygon, False)
if error and error not in errors:
errors.append(error)
if errors:
return 'Invalid MultiPolygon:\n%s' % ', '.join(errors)
def to_mongo(self, value):
if isinstance(value, dict):
return value
return SON([('type', self._type), ('coordinates', value)])
| {
"content_hash": "1227bf0a4bd2c77cc724bb81f3c276f7",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 99,
"avg_line_length": 38.7527386541471,
"alnum_prop": 0.5640269757299197,
"repo_name": "Davidrjx/mongoengine",
"id": "e2b5d3210c248c667121ccc2f6601cbe8d64b715",
"size": "24763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoengine/base/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1102912"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
} |
from know_me import serializers
def test_serialize(api_rf, image, km_user_factory):
"""
Test serializing a Know Me user.
"""
km_user = km_user_factory(image=image)
request = api_rf.get(km_user.get_absolute_url())
serializer = serializers.KMUserInfoSerializer(
km_user, context={"request": request}
)
image_request = api_rf.get(km_user.image.url)
image_url = image_request.build_absolute_uri()
expected = {"id": km_user.pk, "image": image_url, "name": km_user.name}
assert serializer.data == expected
| {
"content_hash": "e5c927ff98d2d61e46c637d2ecdd7ca4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 27.9,
"alnum_prop": 0.6577060931899642,
"repo_name": "knowmetools/km-api",
"id": "34f9c5d891835995dde8eb7812b8da3150bed184",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "km_api/know_me/tests/serializers/test_km_user_info_serializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "7830"
},
{
"name": "JavaScript",
"bytes": "7009"
},
{
"name": "Python",
"bytes": "635776"
},
{
"name": "SCSS",
"bytes": "4662"
},
{
"name": "Shell",
"bytes": "1671"
}
],
"symlink_target": ""
} |
from django.db import models
import uuid, os
from django.conf import settings
from mptt.models import MPTTModel, TreeForeignKey
import logging
logger = logging.getLogger(__name__)
class Category(MPTTModel):
name = models.CharField(max_length=150, unique=True)
weight = models.IntegerField(default=0)
active = models.BooleanField(default=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
slug = models.SlugField(max_length=150, unique=True)
class MPTTMeta:
order_insertion_by = ['weight']
def save(self, *args, **kwargs):
"""
Create a album folder in settings.UPLOAD_ROOT, before save
a new album into database.
"""
logger.debug('album model save {0}'.format(__name__))
category_directory = os.path.join(settings.MEDIA_ROOT, self.slug)
if not os.path.exists(category_directory):
os.makedirs(category_directory)
super(Category, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
from storage import OverwriteStorage
def file_upload_path(instance, filename):
'''
build file upload path
e.g.
upload_path = album_name/f47ac10b-58cc-4372-a567-0e02b2c3d479.jpg
'''
file_extension = filename.split('.')[-1]
saved_name = "{}.{}".format(instance.id, file_extension)
return os.path.join(instance.category.slug, saved_name)
class UploadFile(models.Model):
"""A page object contains multiple text blocks"""
id = models.CharField(max_length=64, primary_key=True, verbose_name=u"UUID key",
default=uuid.uuid4)
name = models.CharField(max_length=150)
category = models.ForeignKey('Category', null=True)
created = models.DateTimeField(db_index=True, auto_now_add=True)
upload_file = models.FileField(storage=OverwriteStorage(), upload_to=file_upload_path)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.name
# Receive the pre_delete signal and delete the file associated with the model instance.
from django.db.models.signals import pre_delete, pre_save, post_delete
from django.dispatch.dispatcher import receiver
import shutil
@receiver(post_delete, sender=Category)
def auto_delete_category_on_delete(sender, instance, **kwargs):
"""Deletes file from filesystem
when corresponding Category object is deleted.
"""
category_directory = os.path.join(settings.MEDIA_ROOT, instance.slug)
if os.path.exists(category_directory):
logger.debug('category on delete trigger delete catagory {0}'.format(category_directory))
shutil.rmtree(category_directory)
# note: do not change category name and parent unless necessary
# @receiver(pre_save, sender=Category)
# def auto_move_category_on_change(sender, instance, **kwargs):
# """Move sub directory from filesystem
# when corresponding File object is changed.
# """
# logger.debug('triggered off by change category {0}'.format(instance))
# if not instance.id:
# return False
#
# try:
# old_category = Category.objects.get(pk=instance.id)
# except UploadFile.DoesNotExist:
# return False
#
# # get old_parent and past parent
# # build source and dest path
# # update path via os.rename(s, d)
#
# new_file = instance.upload_file
#
# # if parent is different, move the new category under new parent
#
# # if parent is same and slug updated, rename the folder name
# if not old_file == new_file:
# old_file.delete(False)
@receiver(pre_delete, sender=UploadFile)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""Deletes file from filesystem
when corresponding Image object is deleted.
"""
# Pass false so FileField doesn't save the model.
instance.upload_file.delete(False)
@receiver(pre_save, sender=UploadFile)
def auto_delete_file_on_change(sender, instance, **kwargs):
"""Update file from filesystem
when corresponding File object is changed.
"""
logger.debug('triggered off by delete file {0}'.format(instance.upload_file))
if not instance.id:
return False
try:
old_file = UploadFile.objects.get(pk=instance.id).upload_file
except UploadFile.DoesNotExist:
return False
new_file = instance.upload_file
if not old_file == new_file:
old_file.delete(False) | {
"content_hash": "103422b324cc560a7aca4b75df63a8f6",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 98,
"avg_line_length": 35.2734375,
"alnum_prop": 0.6757475083056479,
"repo_name": "vollov/filemanager",
"id": "64b0f061edbb4452b627c99a1df9e2802b3afa32",
"size": "4515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/node/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17213"
}
],
"symlink_target": ""
} |
"""Event Log Utilities - helper for win32evtlog.pyd
"""
import win32api, win32con, winerror, win32evtlog, string
error = win32api.error # The error the evtlog module raises.
langid = win32api.MAKELANGID(win32con.LANG_NEUTRAL, win32con.SUBLANG_NEUTRAL)
def AddSourceToRegistry(appName, msgDLL = None, eventLogType = "Application", eventLogFlags = None):
"""Add a source of messages to the event log.
Allows Python program to register a custom source of messages in the
registry. You must also provide the DLL name that has the message table, so the
full message text appears in the event log.
Note that the win32evtlog.pyd file has a number of string entries with just "%1"
built in, so many Python programs can simply use this DLL. Disadvantages are that
you do not get language translation, and the full text is stored in the event log,
blowing the size of the log up.
"""
# When an application uses the RegisterEventSource or OpenEventLog
# function to get a handle of an event log, the event loggging service
# searches for the specified source name in the registry. You can add a
# new source name to the registry by opening a new registry subkey
# under the Application key and adding registry values to the new
# subkey.
if msgDLL is None:
msgDLL = win32evtlog.__file__
# Create a new key for our application
hkey = win32api.RegCreateKey(win32con.HKEY_LOCAL_MACHINE, \
"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%s\\%s" % (eventLogType, appName))
# Add the Event-ID message-file name to the subkey.
win32api.RegSetValueEx(hkey,
"EventMessageFile", # value name \
0, # reserved \
win32con.REG_EXPAND_SZ,# value type \
msgDLL)
# Set the supported types flags and add it to the subkey.
if eventLogFlags is None:
eventLogFlags = win32evtlog.EVENTLOG_ERROR_TYPE | win32evtlog.EVENTLOG_WARNING_TYPE | win32evtlog.EVENTLOG_INFORMATION_TYPE
win32api.RegSetValueEx(hkey, # subkey handle \
"TypesSupported", # value name \
0, # reserved \
win32con.REG_DWORD, # value type \
eventLogFlags)
win32api.RegCloseKey(hkey)
def RemoveSourceFromRegistry(appName, eventLogType = "Application"):
"""Removes a source of messages from the event log.
"""
# Delete our key
try:
win32api.RegDeleteKey(win32con.HKEY_LOCAL_MACHINE, \
"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%s\\%s" % (eventLogType, appName))
except win32api.error, (hr, fn, desc):
if hr != winerror.ERROR_FILE_NOT_FOUND:
raise
def ReportEvent(appName, eventID, eventCategory = 0, eventType=win32evtlog.EVENTLOG_ERROR_TYPE, strings = None, data = None, sid=None):
"""Report an event for a previously added event source.
"""
# Get a handle to the Application event log
hAppLog = win32evtlog.RegisterEventSource(None, appName)
# Now report the event, which will add this event to the event log */
win32evtlog.ReportEvent(hAppLog, # event-log handle \
eventType,
eventCategory,
eventID,
sid,
strings,
data)
win32evtlog.DeregisterEventSource(hAppLog);
def FormatMessage( eventLogRecord, logType="Application" ):
"""Given a tuple from ReadEventLog, and optionally where the event
record came from, load the message, and process message inserts.
Note that this function may raise win32api.error. See also the
function SafeFormatMessage which will return None if the message can
not be processed.
"""
# From the event log source name, we know the name of the registry
# key to look under for the name of the message DLL that contains
# the messages we need to extract with FormatMessage. So first get
# the event log source name...
keyName = "SYSTEM\\CurrentControlSet\\Services\\EventLog\\%s\\%s" % (logType, eventLogRecord.SourceName)
# Now open this key and get the EventMessageFile value, which is
# the name of the message DLL.
handle = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, keyName)
try:
dllNames = win32api.RegQueryValueEx(handle, "EventMessageFile")[0].split(";")
# Win2k etc appear to allow multiple DLL names
data = None
for dllName in dllNames:
try:
# Expand environment variable strings in the message DLL path name,
# in case any are there.
dllName = win32api.ExpandEnvironmentStrings(dllName)
dllHandle = win32api.LoadLibraryEx(dllName, 0, win32con.DONT_RESOLVE_DLL_REFERENCES)
try:
data = win32api.FormatMessageW(win32con.FORMAT_MESSAGE_FROM_HMODULE,
dllHandle, eventLogRecord.EventID, langid, eventLogRecord.StringInserts)
finally:
win32api.FreeLibrary(dllHandle)
except win32api.error:
pass # Not in this DLL - try the next
if data is not None:
break
finally:
win32api.RegCloseKey(handle)
return data or u'' # Don't want "None" ever being returned.
def SafeFormatMessage( eventLogRecord, logType=None ):
"""As for FormatMessage, except returns an error message if
the message can not be processed.
"""
if logType is None: logType = "Application"
try:
return FormatMessage(eventLogRecord, logType)
except win32api.error:
if eventLogRecord.StringInserts is None:
desc = ""
else:
desc = u", ".join(eventLogRecord.StringInserts)
return u"<The description for Event ID ( %d ) in Source ( %r ) could not be found. It contains the following insertion string(s):%r.>" % (winerror.HRESULT_CODE(eventLogRecord.EventID), eventLogRecord.SourceName, desc)
def FeedEventLogRecords(feeder, machineName = None, logName = "Application", readFlags = None):
if readFlags is None:
readFlags = win32evtlog.EVENTLOG_BACKWARDS_READ|win32evtlog.EVENTLOG_SEQUENTIAL_READ
h=win32evtlog.OpenEventLog(machineName, logName)
try:
while 1:
objects = win32evtlog.ReadEventLog(h, readFlags, 0)
if not objects:
break
map(lambda item, feeder = feeder: apply(feeder, (item,)), objects)
finally:
win32evtlog.CloseEventLog(h)
| {
"content_hash": "47251eda80f4177f1f03fec9742814a9",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 225,
"avg_line_length": 43.76315789473684,
"alnum_prop": 0.6518340348767288,
"repo_name": "chvrga/outdoor-explorer",
"id": "48cb820c72dbd555fcaae393f73a537b0a9b1671",
"size": "6652",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "java/play-1.4.4/python/Lib/site-packages/win32/lib/win32evtlogutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4720"
},
{
"name": "C",
"bytes": "76128"
},
{
"name": "C++",
"bytes": "31284"
},
{
"name": "CSS",
"bytes": "107401"
},
{
"name": "HTML",
"bytes": "1754737"
},
{
"name": "Java",
"bytes": "2441299"
},
{
"name": "JavaScript",
"bytes": "1405163"
},
{
"name": "PLpgSQL",
"bytes": "1377"
},
{
"name": "Python",
"bytes": "8991412"
},
{
"name": "Ruby",
"bytes": "295601"
},
{
"name": "Shell",
"bytes": "7499"
},
{
"name": "XQuery",
"bytes": "544017"
},
{
"name": "XSLT",
"bytes": "1099"
}
],
"symlink_target": ""
} |
from ajax_select import register, LookupChannel
import directory.models as dir_models
@register('fraction')
class FractionLookup(LookupChannel):
model = dir_models.Fractions
def get_query(self, q, request):
return self.model.objects.filter(title__icontains=q, hide=False, research__hide=False).order_by('title')
| {
"content_hash": "3e40386cef9322f02192877c26228138",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 112,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.7469879518072289,
"repo_name": "moodpulse/l2",
"id": "d8639fc2938cf1459c838949141415c7976a1193",
"size": "332",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/lookups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38747"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "238498"
},
{
"name": "JavaScript",
"bytes": "425946"
},
{
"name": "Makefile",
"bytes": "1515"
},
{
"name": "Python",
"bytes": "3710422"
},
{
"name": "SCSS",
"bytes": "48493"
},
{
"name": "Shell",
"bytes": "1815"
},
{
"name": "TypeScript",
"bytes": "98237"
},
{
"name": "Vue",
"bytes": "1980612"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.delete_volume, 'volume2'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.create_volume_backup, 'vm1-root', 'vm1-root-backup1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot8'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.create_volume, 'volume4', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.stop_vm, 'vm1'],
[TestAction.change_vm_image, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:[]
Stopped:['vm1']
Enadbled:['vm1-snapshot5', 'volume1-snapshot5', 'volume3-snapshot5', 'vm1-snapshot8', 'volume1-snapshot8', 'volume3-snapshot8', 'vm1-root-backup1', 'vm1-backup2', 'volume1-backup2', 'volume3-backup2', 'vm1-image1']
attached:['volume1', 'volume3', 'volume4']
Detached:[]
Deleted:['volume2', 'vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume3-snapshot5']---vm1@volume1_volume3
vm_snap3:['vm1-snapshot8', 'volume1-snapshot8', 'volume3-snapshot8']---vm1@volume1_volume3
vm_backup1:['vm1-backup2', 'volume1-backup2', 'volume3-backup2']---vm1@volume1_volume3
'''
| {
"content_hash": "e5c74a9465e7a78bf73d2cad7cea94ff",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 214,
"avg_line_length": 43.36363636363637,
"alnum_prop": 0.7023060796645703,
"repo_name": "zstackio/zstack-woodpecker",
"id": "9d89ad3eb070fe8e5ed6c1731bc2d8d373ba41f3",
"size": "1908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/vm_snapshots/paths/xc_path32.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
"""Classes representing Sonos UPnP services.
>>> import soco
>>> device = soco.SoCo('192.168.1.102')
>>> print(RenderingControl(device).GetMute([('InstanceID', 0),
... ('Channel', 'Master')]))
{'CurrentMute': '0'}
>>> r = ContentDirectory(device).Browse([
... ('ObjectID', 'Q:0'),
... ('BrowseFlag', 'BrowseDirectChildren'),
... ('Filter', '*'),
... ('StartingIndex', '0'),
... ('RequestedCount', '100'),
... ('SortCriteria', '')
... ])
>>> print(r['Result'])
<?xml version="1.0" ?><DIDL-Lite xmlns="urn:schemas-upnp-org:metadata ...
>>> for action, in_args, out_args in AlarmClock(device).iter_actions():
... print(action, in_args, out_args)
...
SetFormat [Argument(name='DesiredTimeFormat', vartype='string'), Argument(
name='DesiredDateFormat', vartype='string')] []
GetFormat [] [Argument(name='CurrentTimeFormat', vartype='string'),
Argument(name='CurrentDateFormat', vartype='string')] ...
"""
# UPnP Spec at http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.0.pdf
from __future__ import (
absolute_import, unicode_literals
)
import logging
from collections import namedtuple
from xml.sax.saxutils import escape
import requests
from .cache import Cache
from .events import Subscription
from .exceptions import (
SoCoUPnPException, UnknownSoCoException
)
from .utils import prettify
from .xml import XML, illegal_xml_re, PARSEERROR
# UNICODE NOTE
# UPnP requires all XML to be transmitted/received with utf-8 encoding. All
# strings used in this module are unicode. The Requests library should take
# care of all of the necessary encoding (on sending) and decoding (on
# receiving) for us, provided that we specify the correct encoding headers
# (which, hopefully, we do).
# But since ElementTree seems to prefer being fed bytes to unicode, at least
# for Python 2.x, we have to encode strings specifically before using it. see
# http://bugs.python.org/issue11033 TODO: Keep an eye on this when it comes to
# Python 3 compatibility
log = logging.getLogger(__name__) # pylint: disable=C0103
# logging.basicConfig()
# log.setLevel(logging.INFO)
#: A UPnP Action and its arguments.
Action = namedtuple('Action', 'name, in_args, out_args')
#: A UPnP Argument and its type.
Argument = namedtuple('Argument', 'name, vartype')
# A shared cache for ZoneGroupState. Each zone has the same info, so when a
# SoCo instance is asked for group info, we can cache it and return it when
# another instance is asked. To do this we need a cache to be shared between
# instances
zone_group_state_shared_cache = Cache()
# pylint: disable=too-many-instance-attributes
class Service(object):
"""A class representing a UPnP service.
This is the base class for all Sonos Service classes. This class has a
dynamic method dispatcher. Calls to methods which are not explicitly
defined here are dispatched automatically to the service action with the
same name.
"""
# pylint: disable=bad-continuation
soap_body_template = (
'<?xml version="1.0"?>'
'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
'<s:Body>'
'<u:{action} xmlns:u="urn:schemas-upnp-org:service:'
'{service_type}:{version}">'
'{arguments}'
'</u:{action}>'
'</s:Body>'
'</s:Envelope>') # noqa PEP8
def __init__(self, soco):
"""
Args:
soco (SoCo): A `SoCo` instance to which the UPnP Actions will be
sent
"""
#: `SoCo`: The `SoCo` instance to which UPnP Actions are sent
self.soco = soco
# Some defaults. Some or all these will need to be overridden
# specifically in a sub-class. There is other information we could
# record, but this will do for the moment. Info about a Sonos device is
# available at <IP_address>/xml/device_description.xml in the
# <service> tags
#: str: The UPnP service type.
self.service_type = self.__class__.__name__
#: str: The UPnP service version.
self.version = 1
self.service_id = self.service_type
#: str: The base URL for sending UPnP Actions.
self.base_url = 'http://{0}:1400'.format(self.soco.ip_address)
#: str: The UPnP Control URL.
self.control_url = '/{0}/Control'.format(self.service_type)
#: str: The service control protocol description URL.
self.scpd_url = '/xml/{0}{1}.xml'.format(
self.service_type, self.version)
#: str: The service eventing subscription URL.
self.event_subscription_url = '/{0}/Event'.format(self.service_type)
#: A cache for storing the result of network calls. By default, this is
#: a `TimedCache` with a default timeout=0.
self.cache = Cache(default_timeout=0)
# From table 3.3 in
# http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
# This list may not be complete, but should be good enough to be going
# on with. Error codes between 700-799 are defined for particular
# services, and may be overriden in subclasses. Error codes >800
# are generally SONOS specific. NB It may well be that SONOS does not
# use some of these error codes.
# pylint: disable=invalid-name
self.UPNP_ERRORS = {
400: 'Bad Request',
401: 'Invalid Action',
402: 'Invalid Args',
404: 'Invalid Var',
412: 'Precondition Failed',
501: 'Action Failed',
600: 'Argument Value Invalid',
601: 'Argument Value Out of Range',
602: 'Optional Action Not Implemented',
603: 'Out Of Memory',
604: 'Human Intervention Required',
605: 'String Argument Too Long',
606: 'Action Not Authorized',
607: 'Signature Failure',
608: 'Signature Missing',
609: 'Not Encrypted',
610: 'Invalid Sequence',
611: 'Invalid Control URL',
612: 'No Such Session',
}
def __getattr__(self, action):
"""Called when a method on the instance cannot be found.
Causes an action to be sent to UPnP server. See also
`object.__getattr__`.
Args:
action (str): The name of the unknown method.
Returns:
callable: The callable to be invoked. .
"""
# Define a function to be invoked as the method, which calls
# send_command.
def _dispatcher(self, *args, **kwargs):
"""Dispatch to send_command."""
return self.send_command(action, *args, **kwargs)
# rename the function so it appears to be the called method. We
# probably don't need this, but it doesn't harm
_dispatcher.__name__ = action
# _dispatcher is now an unbound menthod, but we need a bound method.
# This turns an unbound method into a bound method (i.e. one that
# takes self - an instance of the class - as the first parameter)
# pylint: disable=no-member
method = _dispatcher.__get__(self, self.__class__)
# Now we have a bound method, we cache it on this instance, so that
# next time we don't have to go through this again
setattr(self, action, method)
log.debug("Dispatching method %s", action)
# return our new bound method, which will be called by Python
return method
@staticmethod
def wrap_arguments(args=None):
"""Wrap a list of tuples in xml ready to pass into a SOAP request.
Args:
args (list): a list of (name, value) tuples specifying the
name of each argument and its value, eg
``[('InstanceID', 0), ('Speed', 1)]``. The value
can be a string or something with a string representation. The
arguments are escaped and wrapped in <name> and <value> tags.
Example:
>>> from soco import SoCo
>>> device = SoCo('192.168.1.101')
>>> s = Service(device)
>>> print(s.wrap_arguments([('InstanceID', 0), ('Speed', 1)]))
<InstanceID>0</InstanceID><Speed>1</Speed>'
"""
if args is None:
args = []
tags = []
for name, value in args:
tag = "<{name}>{value}</{name}>".format(
name=name, value=escape("%s" % value, {'"': """}))
# % converts to unicode because we are using unicode literals.
# Avoids use of 'unicode' function which does not exist in python 3
tags.append(tag)
xml = "".join(tags)
return xml
@staticmethod
def unwrap_arguments(xml_response):
"""Extract arguments and their values from a SOAP response.
Args:
xml_response (str): SOAP/xml response text (unicode,
not utf-8).
Returns:
dict: a dict of ``{argument_name, value)}`` items.
"""
# A UPnP SOAP response (including headers) looks like this:
# HTTP/1.1 200 OK
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8" DATE: when response was
# generated
# EXT:
# SERVER: OS/version UPnP/1.0 product/version
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionNameResponse
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>out arg value</argumentName>
# ... other out args and their values go here, if any
# </u:actionNameResponse>
# </s:Body>
# </s:Envelope>
# Get all tags in order. Elementree (in python 2.x) seems to prefer to
# be fed bytes, rather than unicode
xml_response = xml_response.encode('utf-8')
try:
tree = XML.fromstring(xml_response)
except PARSEERROR:
# Try to filter illegal xml chars (as unicode), in case that is
# the reason for the parse error
# NOTE: The PARSERROR used here is a Python 2.6 compat trick in
# our xml module. If we ever drop support for Python 2.6 it should
# be replaced with a simple XML.ParseError and the hack in .xml
# removed
filtered = illegal_xml_re.sub('', xml_response.decode('utf-8'))\
.encode('utf-8')
tree = XML.fromstring(filtered)
# Get the first child of the <Body> tag which will be
# <{actionNameResponse}> (depends on what actionName is). Turn the
# children of this into a {tagname, content} dict. XML unescaping
# is carried out for us by elementree.
action_response = tree.find(
"{http://schemas.xmlsoap.org/soap/envelope/}Body")[0]
return dict((i.tag, i.text or "") for i in action_response)
def build_command(self, action, args=None):
"""Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending.
"""
# A complete request should look something like this:
# POST path of control URL HTTP/1.1
# HOST: host of control URL:port of control URL
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName"
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionName
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>in arg value</argumentName>
# ... other in args and their values go here, if any
# </u:actionName>
# </s:Body>
# </s:Envelope>
arguments = self.wrap_arguments(args)
body = self.soap_body_template.format(
arguments=arguments, action=action, service_type=self.service_type,
version=self.version)
soap_action_template = \
"urn:schemas-upnp-org:service:{service_type}:{version}#{action}"
soap_action = soap_action_template.format(
service_type=self.service_type, version=self.version,
action=action)
headers = {'Content-Type': 'text/xml; charset="utf-8"',
'SOAPACTION': soap_action}
# Note that although we set the charset to utf-8 here, in fact the
# body is still unicode. It will only be converted to bytes when it
# is set over the network
return (headers, body)
def send_command(self, action, args=None, cache=None, cache_timeout=None):
"""Send a command to a Sonos device.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
cache (Cache): A cache is operated so that the result will be
stored for up to ``cache_timeout`` seconds, and a subsequent
call with the same arguments within that period will be
returned from the cache, saving a further network call. The
cache may be invalidated or even primed from another thread
(for example if a UPnP event is received to indicate that
the state of the Sonos device has changed). If
``cache_timeout`` is missing or `None`, the cache will use a
default value (which may be 0 - see `cache`). By default,
the cache identified by the service's `cache` attribute will
be used, but a different cache object may be specified in
the `cache` parameter.
Returns:
dict: a dict of ``{argument_name, value)}`` items.
Raises:
`SoCoUPnPException`: if a SOAP error occurs.
`UnknownSoCoException`: if an unknonwn UPnP error occurs.
`requests.exceptions.HTTPError`: if an http error.
"""
if cache is None:
cache = self.cache
result = cache.get(action, args)
if result is not None:
log.debug("Cache hit")
return result
# Cache miss, so go ahead and make a network call
headers, body = self.build_command(action, args)
log.info("Sending %s %s to %s", action, args, self.soco.ip_address)
log.debug("Sending %s, %s", headers, prettify(body))
# Convert the body to bytes, and send it.
response = requests.post(
self.base_url + self.control_url,
headers=headers,
data=body.encode('utf-8')
)
log.debug("Received %s, %s", response.headers, response.text)
status = response.status_code
log.info(
"Received status %s from %s", status, self.soco.ip_address)
if status == 200:
# The response is good. Get the output params, and return them.
# NB an empty dict is a valid result. It just means that no
# params are returned. By using response.text, we rely upon
# the requests library to convert to unicode for us.
result = self.unwrap_arguments(response.text) or True
# Store in the cache. There is no need to do this if there was an
# error, since we would want to try a network call again.
cache.put(result, action, args, timeout=cache_timeout)
return result
elif status == 500:
# Internal server error. UPnP requires this to be returned if the
# device does not like the action for some reason. The returned
# content will be a SOAP Fault. Parse it and raise an error.
try:
self.handle_upnp_error(response.text)
except Exception as exc:
log.exception(str(exc))
raise
else:
# Something else has gone wrong. Probably a network error. Let
# Requests handle it
response.raise_for_status()
def handle_upnp_error(self, xml_error):
"""Disect a UPnP error, and raise an appropriate exception.
Args:
xml_error (str): a unicode string containing the body of the
UPnP/SOAP Fault response. Raises an exception containing the
error code.
"""
# An error code looks something like this:
# HTTP/1.1 500 Internal Server Error
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# DATE: when response was generated
# EXT:
# SERVER: OS/version UPnP/1.0 product/version
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <s:Fault>
# <faultcode>s:Client</faultcode>
# <faultstring>UPnPError</faultstring>
# <detail>
# <UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
# <errorCode>error code</errorCode>
# <errorDescription>error string</errorDescription>
# </UPnPError>
# </detail>
# </s:Fault>
# </s:Body>
# </s:Envelope>
#
# All that matters for our purposes is the errorCode.
# errorDescription is not required, and Sonos does not seem to use it.
# NB need to encode unicode strings before passing to ElementTree
xml_error = xml_error.encode('utf-8')
error = XML.fromstring(xml_error)
log.debug("Error %s", xml_error)
error_code = error.findtext(
'.//{urn:schemas-upnp-org:control-1-0}errorCode')
if error_code is not None:
description = self.UPNP_ERRORS.get(int(error_code), '')
raise SoCoUPnPException(
message='UPnP Error {0} received: {1} from {2}'.format(
error_code, description, self.soco.ip_address),
error_code=error_code,
error_description=description,
error_xml=xml_error
)
else:
# Unknown error, so just return the entire response
log.error("Unknown error received from %s", self.soco.ip_address)
raise UnknownSoCoException(xml_error)
def subscribe(
self, requested_timeout=None, auto_renew=False, event_queue=None):
"""Subscribe to the service's events.
Args:
requested_timeout (int, optional): If requested_timeout is
provided, a subscription valid for that
number of seconds will be requested, but not guaranteed. Check
`Subscription.timeout` on return to find out what period of
validity is actually allocated.
auto_renew (bool): If auto_renew is `True`, the subscription will
automatically be renewed just before it expires, if possible.
Default is `False`.
event_queue (:class:`~queue.Queue`): a thread-safe queue object on
which received events will be put. If not specified,
a (:class:`~queue.Queue`) will be created and used.
Returns:
`Subscription`: an insance of `Subscription`, representing
the new subscription.
To unsubscribe, call the `unsubscribe` method on the returned object.
"""
subscription = Subscription(
self, event_queue)
subscription.subscribe(
requested_timeout=requested_timeout, auto_renew=auto_renew)
return subscription
def _update_cache_on_event(self, event):
"""Update the cache when an event is received.
This will be called before an event is put onto the event queue. Events
will often indicate that the Sonos device's state has changed, so this
opportunity is made available for the service to update its cache. The
event will be put onto the event queue once this method returns.
`event` is an Event namedtuple: ('sid', 'seq', 'service', 'variables')
.. warning:: This method will not be called from the main thread but
by one or more threads, which handle the events as they come in.
You *must not* access any class, instance or global variables
without appropriate locks. Treat all parameters passed to this
method as read only.
"""
pass
def iter_actions(self):
"""Yield the service's actions with their arguments.
Yields:
`Action`: the next action.
Each action is an Action namedtuple, consisting of action_name
(a string), in_args (a list of Argument namedtuples consisting of name
and argtype), and out_args (ditto), eg::
Action(
name='SetFormat',
in_args=[
Argument(name='DesiredTimeFormat', vartype='string'),
Argument(name='DesiredDateFormat', vartype='string')],
out_args=[]
)
"""
# pylint: disable=too-many-locals
# TODO: Provide for Allowed value list, Allowed value range,
# default value
# pylint: disable=invalid-name
ns = '{urn:schemas-upnp-org:service-1-0}'
# get the scpd body as bytes, and feed directly to elementtree
# which likes to receive bytes
scpd_body = requests.get(self.base_url + self.scpd_url).content
tree = XML.fromstring(scpd_body)
# parse the state variables to get the relevant variable types
vartypes = {}
srvStateTables = tree.findall('{0}serviceStateTable'.format(ns))
for srvStateTable in srvStateTables:
statevars = srvStateTable.findall('{0}stateVariable'.format(ns))
for state in statevars:
name = state.findtext('{0}name'.format(ns))
vartypes[name] = state.findtext('{0}dataType'.format(ns))
# find all the actions
actionLists = tree.findall('{0}actionList'.format(ns))
for actionList in actionLists:
actions = actionList.findall('{0}action'.format(ns))
for i in actions:
action_name = i.findtext('{0}name'.format(ns))
argLists = i.findall('{0}argumentList'.format(ns))
for argList in argLists:
args_iter = argList.findall('{0}argument'.format(ns))
in_args = []
out_args = []
for arg in args_iter:
arg_name = arg.findtext('{0}name'.format(ns))
direction = arg.findtext('{0}direction'.format(ns))
related_variable = arg.findtext(
'{0}relatedStateVariable'.format(ns))
vartype = vartypes[related_variable]
if direction == "in":
in_args.append(Argument(arg_name, vartype))
else:
out_args.append(Argument(arg_name, vartype))
yield Action(action_name, in_args, out_args)
def iter_event_vars(self):
"""Yield the services eventable variables.
Yields:
`tuple`: a tuple of (variable name, data type).
"""
# pylint: disable=invalid-name
ns = '{urn:schemas-upnp-org:service-1-0}'
scpd_body = requests.get(self.base_url + self.scpd_url).text
tree = XML.fromstring(scpd_body.encode('utf-8'))
# parse the state variables to get the relevant variable types
statevars = tree.findall('{0}stateVariable'.format(ns))
for state in statevars:
# We are only interested if 'sendEvents' is 'yes', i.e this
# is an eventable variable
if state.attrib['sendEvents'] == "yes":
name = state.findtext('{0}name'.format(ns))
vartype = state.findtext('{0}dataType'.format(ns))
yield (name, vartype)
class AlarmClock(Service):
"""Sonos alarm service, for setting and getting time and alarms."""
def __init__(self, soco):
super(AlarmClock, self).__init__(soco)
self.UPNP_ERRORS.update(
{
801: 'Already an alarm for this time',
})
class MusicServices(Service):
"""Sonos music services service, for functions related to 3rd party music
services."""
def __init__(self, soco):
super(MusicServices, self).__init__(soco)
class DeviceProperties(Service):
"""Sonos device properties service, for functions relating to zones, LED
state, stereo pairs etc."""
def __init__(self, soco):
super(DeviceProperties, self).__init__(soco)
class SystemProperties(Service):
"""Sonos system properties service, for functions relating to
authentication etc."""
def __init__(self, soco):
super(SystemProperties, self).__init__(soco)
class ZoneGroupTopology(Service):
"""Sonos zone group topology service, for functions relating to network
topology, diagnostics and updates."""
def __init__(self, soco):
super(ZoneGroupTopology, self).__init__(soco)
def GetZoneGroupState(self, *args, **kwargs):
"""Overrides default handling to use the global shared zone group state
cache, unless another cache is specified."""
kwargs['cache'] = kwargs.get('cache', zone_group_state_shared_cache)
return self.send_command('GetZoneGroupState', *args, **kwargs)
class GroupManagement(Service):
"""Sonos group management service, for services relating to groups."""
def __init__(self, soco):
super(GroupManagement, self).__init__(soco)
class QPlay(Service):
"""Sonos Tencent QPlay service (a Chinese music service)"""
def __init__(self, soco):
super(QPlay, self).__init__(soco)
class ContentDirectory(Service):
"""UPnP standard Content Directory service, for functions relating to
browsing, searching and listing available music."""
def __init__(self, soco):
super(ContentDirectory, self).__init__(soco)
self.control_url = "/MediaServer/ContentDirectory/Control"
self.event_subscription_url = "/MediaServer/ContentDirectory/Event"
# For error codes, see table 2.7.16 in
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v1-Service.pdf
self.UPNP_ERRORS.update({
701: 'No such object',
702: 'Invalid CurrentTagValue',
703: 'Invalid NewTagValue',
704: 'Required tag',
705: 'Read only tag',
706: 'Parameter Mismatch',
708: 'Unsupported or invalid search criteria',
709: 'Unsupported or invalid sort criteria',
710: 'No such container',
711: 'Restricted object',
712: 'Bad metadata',
713: 'Restricted parent object',
714: 'No such source resource',
715: 'Resource access denied',
716: 'Transfer busy',
717: 'No such file transfer',
718: 'No such destination resource',
719: 'Destination resource access denied',
720: 'Cannot process the request',
})
class MS_ConnectionManager(Service): # pylint: disable=invalid-name
"""UPnP standard connection manager service for the media server."""
def __init__(self, soco):
super(MS_ConnectionManager, self).__init__(soco)
self.service_type = "ConnectionManager"
self.control_url = "/MediaServer/ConnectionManager/Control"
self.event_subscription_url = "/MediaServer/ConnectionManager/Event"
class RenderingControl(Service):
"""UPnP standard rendering control service, for functions relating to
playback rendering, eg bass, treble, volume and EQ."""
def __init__(self, soco):
super(RenderingControl, self).__init__(soco)
self.control_url = "/MediaRenderer/RenderingControl/Control"
self.event_subscription_url = "/MediaRenderer/RenderingControl/Event"
class MR_ConnectionManager(Service): # pylint: disable=invalid-name
"""UPnP standard connection manager service for the media renderer."""
def __init__(self, soco):
super(MR_ConnectionManager, self).__init__(soco)
self.service_type = "ConnectionManager"
self.control_url = "/MediaRenderer/ConnectionManager/Control"
self.event_subscription_url = "/MediaRenderer/ConnectionManager/Event"
class AVTransport(Service):
"""UPnP standard AV Transport service, for functions relating to transport
management, eg play, stop, seek, playlists etc."""
def __init__(self, soco):
super(AVTransport, self).__init__(soco)
self.control_url = "/MediaRenderer/AVTransport/Control"
self.event_subscription_url = "/MediaRenderer/AVTransport/Event"
# For error codes, see
# http://upnp.org/specs/av/UPnP-av-AVTransport-v1-Service.pdf
self.UPNP_ERRORS.update({
701: 'Transition not available',
702: 'No contents',
703: 'Read error',
704: 'Format not supported for playback',
705: 'Transport is locked',
706: 'Write error',
707: 'Media is protected or not writeable',
708: 'Format not supported for recording',
709: 'Media is full',
710: 'Seek mode not supported',
711: 'Illegal seek target',
712: 'Play mode not supported',
713: 'Record quality not supported',
714: 'Illegal MIME-Type',
715: 'Content "BUSY"',
716: 'Resource Not found',
717: 'Play speed not supported',
718: 'Invalid InstanceID',
737: 'No DNS Server',
738: 'Bad Domain Name',
739: 'Server Error',
})
class Queue(Service):
"""Sonos queue service, for functions relating to queue management, saving
queues etc."""
def __init__(self, soco):
super(Queue, self).__init__(soco)
self.control_url = "/MediaRenderer/Queue/Control"
self.event_subscription_url = "/MediaRenderer/Queue/Event"
class GroupRenderingControl(Service):
"""Sonos group rendering control service, for functions relating to group
volume etc."""
def __init__(self, soco):
super(GroupRenderingControl, self).__init__(soco)
self.control_url = "/MediaRenderer/GroupRenderingControl/Control"
self.event_subscription_url = \
"/MediaRenderer/GroupRenderingControl/Event"
| {
"content_hash": "a33e9344e4f79f6b5aa852f38c7d62db",
"timestamp": "",
"source": "github",
"line_count": 795,
"max_line_length": 79,
"avg_line_length": 40.17987421383648,
"alnum_prop": 0.5941520833985536,
"repo_name": "lawrenceakka/SoCo",
"id": "cf9170cda7882c57acc4f20a351d4a7f43c2e7d6",
"size": "32006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "soco/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "168"
},
{
"name": "Makefile",
"bytes": "368"
},
{
"name": "Python",
"bytes": "532281"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
__all__ = [
'RedisError',
'ProtocolError',
'ReplyError',
'MaxClientsError',
'AuthError',
'PipelineError',
'MultiExecError',
'WatchVariableError',
'ChannelClosedError',
'ConnectionClosedError',
'ConnectionForcedCloseError',
'PoolClosedError',
'MasterNotFoundError',
'SlaveNotFoundError',
'ReadOnlyError',
]
class RedisError(Exception):
"""Base exception class for aioredis exceptions."""
class ProtocolError(RedisError):
"""Raised when protocol error occurs."""
class ReplyError(RedisError):
"""Raised for redis error replies (-ERR)."""
MATCH_REPLY = None
def __new__(cls, msg, *args):
for klass in cls.__subclasses__():
if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):
return klass(msg, *args)
return super().__new__(cls, msg, *args)
class MaxClientsError(ReplyError):
"""Raised for redis server when the maximum number of client has been
reached."""
MATCH_REPLY = "ERR max number of clients reached"
class AuthError(ReplyError):
"""Raised when authentication errors occurs."""
MATCH_REPLY = ("NOAUTH ", "ERR invalid password")
class PipelineError(RedisError):
"""Raised if command within pipeline raised error."""
def __init__(self, errors):
super().__init__('{} errors:'.format(self.__class__.__name__), errors)
class MultiExecError(PipelineError):
"""Raised if command within MULTI/EXEC block caused error."""
class WatchVariableError(MultiExecError):
"""Raised if watched variable changed (EXEC returns None)."""
class ChannelClosedError(RedisError):
"""Raised when Pub/Sub channel is unsubscribed and messages queue is empty.
"""
class ReadOnlyError(RedisError):
"""Raised from slave when read-only mode is enabled"""
class MasterNotFoundError(RedisError):
"""Raised for sentinel master not found error."""
class SlaveNotFoundError(RedisError):
"""Raised for sentinel slave not found error."""
class MasterReplyError(RedisError):
"""Raised by sentinel client for master error replies."""
class SlaveReplyError(RedisError):
"""Raised by sentinel client for slave error replies."""
class ConnectionClosedError(RedisError):
"""Raised if connection to server was closed."""
class ConnectionForcedCloseError(ConnectionClosedError):
"""Raised if connection was closed with .close() method."""
class PoolClosedError(RedisError):
"""Raised if pool is closed."""
class RedisClusterError(RedisError):
"""Cluster exception class for aioredis exceptions."""
| {
"content_hash": "e3f8231dc9662874a17b29a8780a08ed",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 24.78301886792453,
"alnum_prop": 0.6855728968405025,
"repo_name": "ymap/aioredis",
"id": "6aa800eef3c465f12e6667f6ee0915307785aaab",
"size": "2627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aioredis/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2781"
},
{
"name": "Python",
"bytes": "556618"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.blocktools import (
create_block,
create_coinbase,
)
from test_framework.messages import (
CAuxPow,
uint256_from_compact,
)
from test_framework.p2p import P2PDataStore
from test_framework.util import assert_equal
from test_framework.auxpow_testing import computeAuxpow
import codecs
from io import BytesIO
class AuxpowInvalidPoWTest (BitcoinTestFramework):
def set_test_params (self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test (self):
node = self.nodes[0]
peer = node.add_p2p_connection (P2PDataStore ())
self.log.info ("Sending block with invalid auxpow over P2P...")
tip = node.getbestblockhash ()
blk, blkHash = self.createBlock ()
blk = self.addAuxpow (blk, blkHash, False)
peer.send_blocks_and_test ([blk], node, force_send=True,
success=False, reject_reason="high-hash")
assert_equal (node.getbestblockhash (), tip)
self.log.info ("Sending the same block with valid auxpow...")
blk = self.addAuxpow (blk, blkHash, True)
peer.send_blocks_and_test ([blk], node, success=True)
assert_equal (node.getbestblockhash (), blkHash)
self.log.info ("Submitting block with invalid auxpow...")
tip = node.getbestblockhash ()
blk, blkHash = self.createBlock ()
blk = self.addAuxpow (blk, blkHash, False)
assert_equal (node.submitblock (blk.serialize ().hex ()), "high-hash")
assert_equal (node.getbestblockhash (), tip)
self.log.info ("Submitting block with valid auxpow...")
blk = self.addAuxpow (blk, blkHash, True)
assert_equal (node.submitblock (blk.serialize ().hex ()), None)
assert_equal (node.getbestblockhash (), blkHash)
def createBlock (self):
"""
Creates a new block that is valid for the current tip. It is marked as
auxpow, but the auxpow is not yet filled in.
"""
bestHash = self.nodes[0].getbestblockhash ()
bestBlock = self.nodes[0].getblock (bestHash)
tip = int (bestHash, 16)
height = bestBlock["height"] + 1
time = bestBlock["time"] + 1
block = create_block (tip, create_coinbase (height), time)
block.mark_auxpow ()
block.rehash ()
newHash = "%064x" % block.sha256
return block, newHash
def addAuxpow (self, block, blkHash, ok):
"""
Fills in the auxpow for the given block message. It is either
chosen to be valid (ok = True) or invalid (ok = False).
"""
target = b"%064x" % uint256_from_compact (block.nBits)
auxpowHex = computeAuxpow (blkHash, target, ok)
block.auxpow = CAuxPow ()
block.auxpow.deserialize (BytesIO (bytes.fromhex (auxpowHex)))
return block
if __name__ == '__main__':
AuxpowInvalidPoWTest ().main ()
| {
"content_hash": "3f9b421c34248715b0dd7ae8d2784aa6",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 75,
"avg_line_length": 32.28409090909091,
"alnum_prop": 0.6719464977120733,
"repo_name": "pataquets/namecoin-core",
"id": "498430abf71788cbee43705f65154c1d20d01bd1",
"size": "3294",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/functional/auxpow_invalidpow.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1183056"
},
{
"name": "C++",
"bytes": "9920495"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "235507"
},
{
"name": "Makefile",
"bytes": "139393"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2857373"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "56897"
},
{
"name": "Scheme",
"bytes": "24552"
},
{
"name": "Shell",
"bytes": "210484"
}
],
"symlink_target": ""
} |
"""
Hubs and authorities analysis of graph structure.
"""
#!/usr/bin/env python
# Copyright (C) 2008-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# NetworkX:http://networkx.lanl.gov/
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['hits','hits_numpy','hits_scipy','authority_matrix','hub_matrix']
import networkx as nx
from networkx.exception import NetworkXError
def hits(G,max_iter=100,tol=1.0e-8,nstart=None):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception("hits() not defined for graphs with multiedges.")
# choose fixed starting vector if not given
if nstart is None:
h=dict.fromkeys(G,1.0/G.number_of_nodes())
else:
h=nstart
# normalize starting vector
s=1.0/sum(h.values())
for k in h:
h[k]*=s
i=0
while True: # power iteration: make up to max_iter iterations
hlast=h
h=dict.fromkeys(hlast.keys(),0)
a=dict.fromkeys(hlast.keys(),0)
# this "matrix multiply" looks odd because it is
# doing a left multiply a^T=hlast^T*G
for n in h:
for nbr in G[n]:
a[nbr]+=hlast[n]*G[n][nbr].get('weight',1)
# now multiply h=Ga
for n in h:
for nbr in G[n]:
h[n]+=a[nbr]*G[n][nbr].get('weight',1)
# normalize vector
s=1.0/sum(h.values())
for n in h: h[n]*=s
# normalize vector
s=1.0/sum(a.values())
for n in a: a[n]*=s
# check convergence, l1 norm
err=sum([abs(h[n]-hlast[n]) for n in h])
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
return h,a
def authority_matrix(G,nodelist=None):
"""Return the HITS authority matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M.T*M
def hub_matrix(G,nodelist=None):
"""Return the HITS hub matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M*M.T
def hits_numpy(G):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
-----------
G : graph
A NetworkX graph
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation uses NumPy's interface to LAPACK.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"hits_numpy() requires NumPy: http://scipy.org/")
H=nx.hub_matrix(G,G.nodes())
e,ev=np.linalg.eig(H)
m=e.argsort()[-1] # index of maximum eigenvalue
h=np.array(ev[:,m]).flatten()
A=nx.authority_matrix(G,G.nodes())
e,ev=np.linalg.eig(A)
m=e.argsort()[-1] # index of maximum eigenvalue
a=np.array(ev[:,m]).flatten()
hubs=dict(zip(G.nodes(),h/h.sum()))
authorities=dict(zip(G.nodes(),a/a.sum()))
return hubs,authorities
def hits_scipy(G,max_iter=100,tol=1.0e-6):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
-----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
This implementation uses SciPy sparse matrices.
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-632, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import scipy.sparse
import numpy as np
except ImportError:
raise ImportError(\
"hits_scipy() requires SciPy: http://scipy.org/")
M=nx.to_scipy_sparse_matrix(G,nodelist=G.nodes())
(n,m)=M.shape # should be square
A=M.T*M # authority matrix
x=scipy.ones((n,1))/n # initial guess
# power iteration on authority matrix
i=0
while True:
xlast=x
x=A*x
x=x/x.sum()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
a=np.asarray(x).flatten()
# h=M*a
h=np.asarray(M*a).flatten()
hubs=dict(zip(G.nodes(),h/h.sum()))
authorities=dict(zip(G.nodes(),a/a.sum()))
return hubs,authorities
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| {
"content_hash": "381e59a560f67fbf86ba51ebd6d5fa7e",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 79,
"avg_line_length": 30.482517482517483,
"alnum_prop": 0.6203257627896307,
"repo_name": "uwescience/myria-web",
"id": "7f6925f7c6c532375b97c841c42af58b0a3957da",
"size": "8718",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "appengine/networkx/algorithms/link_analysis/hits_alg.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42071"
},
{
"name": "HTML",
"bytes": "34877"
},
{
"name": "JavaScript",
"bytes": "253697"
},
{
"name": "Python",
"bytes": "2346705"
},
{
"name": "Shell",
"bytes": "699"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'jjg+google-cloud-python@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-core >= 0.23.1, < 0.24dev',
'google-cloud-logging >= 0.23.0, < 0.24dev',
'gapic-google-cloud-error-reporting-v1beta1 >= 0.15.0, < 0.16dev'
]
setup(
name='google-cloud-error-reporting',
version='0.23.1',
description='Python Client for Stackdriver Error Reporting',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
],
packages=find_packages(),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| {
"content_hash": "16f930efa3a8cca6c6a161d0089830f7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 29.464285714285715,
"alnum_prop": 0.6236363636363637,
"repo_name": "daspecster/google-cloud-python",
"id": "e8086d32427709ed3dde6587c2b7816820ab394b",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "error_reporting/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "4033334"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
"""Custom component for testing the deployed model."""
import logging
import json
import argparse
from google.cloud import aiplatform
from kfp.v2.components import executor
from kfp.v2.dsl import Artifact, Input
# pylint: disable=logging-fstring-interpolation
def test_endpoint(project_id: str,
data_region: str,
data_pipeline_root: str,
test_instances: str,
endpoint: Input[Artifact]):
"""Test an endpoint.
Args:
project_id: The project ID.
data_region: The region for the endpoint.
data_pipeline_root: The staging location for any custom job.
test_instances: The testing instances.
endpoint: The output artifact of the endpoint.
"""
aiplatform.init(
project=project_id,
location=data_region,
staging_bucket=data_pipeline_root)
endpoint_rn = endpoint.uri.replace('aiplatform://v1/', '')
endpoint = aiplatform.Endpoint(endpoint_rn)
instances = json.loads(test_instances)
predictions = endpoint.predict(instances=instances)
logging.info(f'prediction result {predictions}')
def executor_main():
"""Main executor."""
parser = argparse.ArgumentParser()
parser.add_argument('--executor_input', type=str)
parser.add_argument('--function_to_execute', type=str)
args, _ = parser.parse_known_args()
executor_input = json.loads(args.executor_input)
function_to_execute = globals()[args.function_to_execute]
executor.Executor(
executor_input=executor_input,
function_to_execute=function_to_execute).execute()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
executor_main()
| {
"content_hash": "7029a193dfedb3ee7877ef33baf89a87",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 64,
"avg_line_length": 27.229508196721312,
"alnum_prop": 0.6911499096929561,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "d05d6f0b8e6191c824ce58fe8d66e02ed710086f",
"size": "2258",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/vertex_pipeline/components/component_base/src/test_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
} |
from __future__ import division
import math
import numpy
from scipy.io import wavfile
def wavread(fn):
"""Emulate the parts of the matlab wavread function that we need.
y, Fs = wavread(fn)
y is the vector of audio samples, Fs is the frame rate.
Matlab's wavread is used by voicesauce to read in the wav files for
processing. As a consequence, all the translated algorithms assume the
data from the wav file is in matlab form, which in this case means a double
precision float between -1 and 1. The corresponding scipy function returns
the actual integer PCM values from the file, which range between -32768 and
32767. (matlab's wavread *can* return the integers, but does not by
default and voicesauce uses the default). Consequently, after reading the
data using scipy's io.wavfile, we convert to float by dividing each integer
by 32768.
"""
# For reference, I figured this out from:
# http://mirlab.org/jang/books/audiosignalprocessing/matlab4waveRead.asp?title=4-2%20Reading%20Wave%20Files
# XXX: if we need to handle 8 bit files we'll need to detect them and
# special case them here.
Fs, y = wavfile.read(fn)
return y/numpy.float64(32768.0), Fs
class SoundFile:
def __init__(self, settings, wavfile):
self.settings = settings
self.wavfile = wavfile # path to wavfile
self.matfile = None
self.y = None
self.Fs = None # corresponds to Fs in opensauce
self.data_len = None
self.f0 = None
self.measurements = {}
self.build()
def build(self):
self.get_matfile()
self.read_in()
self.calc_data_len()
def get_matfile(self):
return self.wavfile[:-3]+"mat"
def read_in(self):
y, Fs = wavread(self.wavfile)
self.samplerate = Fs
self.data = y
def calc_data_len(self):
self.data_len = math.floor(len(self.data) / self.samplerate * 1000 / int(self.settings['frameshift']))
def get_parameters(parameter_file):
'''
Read in parameters file
:rtype : list
'''
on = []
with open(parameter_file, "r") as f:
for line in f.readlines():
line = line.split("|")
line = [i.strip() for i in line]
if line[1] == '1':
p = line[0].strip()
on.append(p)
return on
def get_settings(settings_file):
'''
Reads in settings file.
TODO parse each setting and convert to int/floats where appropriate
:rtype : dict
'''
settings = {}
with open(settings_file, "r") as f:
for line in f.readlines():
line = line.split(",")
settings[line[0]] = line[1]
return settings
| {
"content_hash": "84303267e81a2ae5d25a4453f6a7ec62",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 111,
"avg_line_length": 30.307692307692307,
"alnum_prop": 0.6221899927483684,
"repo_name": "voicesauce/opensauce-python",
"id": "faab1ce927f9d1e3a532a797bf4eac5e541ed981",
"size": "2758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "legacy/helpers_legacy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "229"
},
{
"name": "MATLAB",
"bytes": "459"
},
{
"name": "Python",
"bytes": "338575"
},
{
"name": "Shell",
"bytes": "6762"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetLastMessageThatContains(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetLastMessageThatContains Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Twilio/SMSMessages/GetLastMessageThatContains')
def new_input_set(self):
return GetLastMessageThatContainsInputSet()
def _make_result_set(self, result, path):
return GetLastMessageThatContainsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetLastMessageThatContainsChoreographyExecution(session, exec_id, path)
class GetLastMessageThatContainsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetLastMessageThatContains
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
InputSet._set_input(self, 'AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
InputSet._set_input(self, 'AuthToken', value)
def set_Filter(self, value):
"""
Set the value of the Filter input for this Choreo. ((required, string) A search string to apply to the message body field.)
"""
InputSet._set_input(self, 'Filter', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results per page to search through. Defaults to 50.)
"""
InputSet._set_input(self, 'PageSize', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to retrieve. Defaults to 0.)
"""
InputSet._set_input(self, 'Page', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml. This parameter is only valid when setting ResponseMode to "verbose".)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_ResponseMode(self, value):
"""
Set the value of the ResponseMode input for this Choreo. ((optional, string) Used to simplify the response. Valid values are: simple and verbose. When set to simple, only the message string is returned. Verbose mode returns the full object. Defaults to "simple".)
"""
InputSet._set_input(self, 'ResponseMode', value)
def set_ReturnLegacyFormat(self, value):
"""
Set the value of the ReturnLegacyFormat input for this Choreo. ((optional, boolean) If set to true, XML responses will be formatted using the deprecated /SMS/Messages resource schema. This should only be used if you have existing code that relies on the older schema.)
"""
InputSet._set_input(self, 'ReturnLegacyFormat', value)
class GetLastMessageThatContainsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetLastMessageThatContains Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_TotalPages(self):
"""
Retrieve the value for the "TotalPages" output from this Choreo execution. ((integer) The total number of result pages that are available to search. If your search returns no results, you can increment the Page input to search further into the list of messages.)
"""
return self._output.get('TotalPages', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class GetLastMessageThatContainsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetLastMessageThatContainsResultSet(response, path)
| {
"content_hash": "44679f9321b6a174122bec1aa6b34e79",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 276,
"avg_line_length": 49.708333333333336,
"alnum_prop": 0.6963537300922046,
"repo_name": "egetzel/wecrow",
"id": "d5f6eb23156805bae10ae4bdbb01bdfdb0b2845f",
"size": "5097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "truehand2014/temboo/Library/Twilio/SMSMessages/GetLastMessageThatContains.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "11736"
},
{
"name": "Python",
"bytes": "474202"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "assembly.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "2f4ddac7e0c847f1d8527a0d3d067b88",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "Karolain/cms",
"id": "0e530673cdf56c702f2240af0690b9e18c6b14fb",
"size": "251",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "55245"
},
{
"name": "HTML",
"bytes": "67153"
},
{
"name": "JavaScript",
"bytes": "31136"
},
{
"name": "Nginx",
"bytes": "1532"
},
{
"name": "Python",
"bytes": "19575"
},
{
"name": "Shell",
"bytes": "3889"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nwbb_members', '0014_auto_20160524_2123'),
]
operations = [
migrations.AlterModelOptions(
name='member',
options={'permissions': (('view_member', 'Can view members'), ('view_full_postcode', 'Can view partial postcodes'), ('view_partial_postcode', 'Can view partial postcodes'), ('view_comments', 'Can view comments about a member'), ('view_bike_reg', 'Can view members reg numbers'))},
),
migrations.RemoveField(
model_name='member',
name='roles',
),
migrations.DeleteModel(
name='RoleType',
),
]
| {
"content_hash": "4e8b82286fc895447752d9de0d8f4fe6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 292,
"avg_line_length": 31.875,
"alnum_prop": 0.5973856209150327,
"repo_name": "nwbb-sc/database",
"id": "4782f54943be316fe3d786ccf0c36b5a6ae8f044",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nwbb_members/migrations/0015_auto_20160603_1134.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20271"
},
{
"name": "HTML",
"bytes": "95904"
},
{
"name": "JavaScript",
"bytes": "51744"
},
{
"name": "Python",
"bytes": "86982"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.comments.moderation import CommentModerator, moderator
from django.contrib.contenttypes import generic
from django.db import models
from django.utils.encoding import force_unicode
from django.contrib.comments.models import Comment
from project_utils import CKEditor
from models import BlogEntry, TumblelogEntry
from tagging.models import TaggedItem
class TaggingInline(generic.GenericTabularInline):
model = TaggedItem
extra = 3
verbose_name_plural = "Tags for this item:"
max_num = 10
class CommentsInline(generic.GenericTabularInline):
ct_fk_field = "object_pk"
model = Comment
extra = 1
verbose_name_plural = "Comments for this item:"
max_num = 10
class BlogEntryAdmin(admin.ModelAdmin):
"""
An admin site object for Blog entries.
"""
date_hierarchy = 'pub_date'
list_display = ('headline', 'pub_date', 'author', 'status')
list_filter = ('author', 'status',)
fieldsets = (
(None, {
'fields': ('slug', 'headline', 'intro', 'body')
}),
('Metadata', {
'fields': ('status', 'pub_date', 'author', 'allow_comments')
}),
)
formfield_overrides = {
models.TextField: {
'widget' : CKEditor(ck_attrs={'height': '500px'})
}
}
inlines = [TaggingInline]
class Media:
css = {"all": ("styles/admin.css",)}
class TumblelogEntryAdmin(admin.ModelAdmin):
"""
An admin site object for Tumblelog entries. Handles posting to Twitter.
"""
date_hierarchy = 'pub_date'
list_display = ('title', 'pub_date', 'author', 'status')
list_filter = ('author', 'status',)
fieldsets = (
(None, {
'fields': ('slug', 'title', 'post')
}),
('Metadata', {
'fields': ('status', 'pub_date', 'author', 'to_twitter')
}),
)
formfield_overrides = {
models.TextField: {
'widget' : CKEditor(ck_attrs={'height': '350px'})
}
}
inlines = [TaggingInline]
class Media:
css = {"all": ("styles/admin.css",)}
def save_model(self, request, obj, form, change):
if obj.to_twitter == 'p':
save_successful = obj.twitter_pre_save(request=request)
message_args = {
'name': force_unicode(obj._meta.verbose_name),
'obj': force_unicode(obj)
}
if save_successful:
self.message_user(request, (
"The %(name)s \"%(obj)s\" was successfully posted to "
"twitter."
) % message_args)
else:
self.message_user(request, (
"The %(name)s \"%(obj)s\" could not be posted to twitter. "
"There was an error communicating with the Twitter API."
) % message_args)
super(TumblelogEntryAdmin, self).save_model(request, obj, form, change)
admin.site.register(TumblelogEntry, TumblelogEntryAdmin)
admin.site.register(BlogEntry, BlogEntryAdmin)
class BlogEntryModerator(CommentModerator):
enable_field = 'allow_comments'
moderator.register(BlogEntry, BlogEntryModerator) | {
"content_hash": "16dc55f6031693a0d06ecbbae628a494",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 30.373831775700936,
"alnum_prop": 0.588,
"repo_name": "mazelife/django-belleville",
"id": "cce815858d0c838dc3773f00f261d861912bde53",
"size": "3250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "belleville/blogging/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "341825"
},
{
"name": "Python",
"bytes": "72077"
}
],
"symlink_target": ""
} |
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
SYSTEM_PATH = u'$SYSTEM_PATH$'
args = []
skip_next = False
for i, arg in enumerate(sys.argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print(('Usage:\n' +
' {0} -h |-? | --help\n' +
' {0} [-l|--line line] file[:line]\n' +
' {0} diff <left> <right>\n' +
' {0} merge <local> <remote> [base] <merged>').format(sys.argv[0]))
exit(0)
elif arg == 'diff' and i == 0:
args.append(arg)
elif arg == 'merge' and i == 0:
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
args.append(os.path.abspath(file_path))
else:
args.append(os.path.abspath(arg))
else:
args.append(os.path.abspath(arg))
def launch_with_port(port, token):
found = False
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except:
return False
while True:
try:
path_len = struct.unpack(">h", s.recv(2))[0]
path = s.recv(path_len)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except:
break
if found:
if args:
cmd = "activate " + token + '\0' + os.getcwd() + "\0" + "\0".join(args)
encoded = struct.pack(">h", len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close socket immediately
return True
return False
port_path = os.path.join(CONFIG_PATH, 'port')
token_path = os.path.join(SYSTEM_PATH, 'token')
if os.path.exists(port_path) and os.path.exists(token_path):
try:
f = open(port_path)
port = int(f.read())
f.close()
f = open(token_path)
token = f.read()
f.close()
launch_with_port(port, token)
except:
type, value, traceback = sys.exc_info()
print('Cannot activate a running instance: ' + str(value))
else:
print('No IDE instance has been found. New one will be started.')
if sys.platform == "darwin":
# OS X: RUN_PATH is *.app path
if len(args):
args.insert(0, "--args")
os.execvp("open", ["-a", RUN_PATH] + args)
else:
# Unix common
bin_dir, bin_file = os.path.split(RUN_PATH)
os.execv(RUN_PATH, [bin_file] + args)
| {
"content_hash": "3bfd3e29a89e9e2863e4da8ed9631be8",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 83,
"avg_line_length": 28.320388349514563,
"alnum_prop": 0.5138841275282825,
"repo_name": "hurricup/intellij-community",
"id": "270cbc04fe8e189df5f55fb5e7b98706ab92811d",
"size": "2964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "platform/platform-resources/src/launcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "59458"
},
{
"name": "C",
"bytes": "215610"
},
{
"name": "C#",
"bytes": "1538"
},
{
"name": "C++",
"bytes": "196925"
},
{
"name": "CSS",
"bytes": "197224"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Cucumber",
"bytes": "14382"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groff",
"bytes": "35232"
},
{
"name": "Groovy",
"bytes": "2831828"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1809290"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "156277117"
},
{
"name": "JavaScript",
"bytes": "563135"
},
{
"name": "Jupyter Notebook",
"bytes": "92629"
},
{
"name": "Kotlin",
"bytes": "1888388"
},
{
"name": "Lex",
"bytes": "179397"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "52097"
},
{
"name": "Objective-C",
"bytes": "28750"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6607"
},
{
"name": "Python",
"bytes": "23832829"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "61583"
},
{
"name": "Smalltalk",
"bytes": "64"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
"""
Annotate sequences with partition numbers.
% python scripts/annotate-partitions.py <pmap_file> <file1> [ <file2> ... ]
Partition-annotated sequences will be in <fileN>.part.
Use '-h' for parameter help.
"""
import os
import textwrap
import sys
from khmer import __version__, Nodegraph
from khmer.kfile import check_input_files, check_space
from khmer.khmer_args import (sanitize_help, KhmerArgumentParser)
DEFAULT_K = 32
def get_parser():
epilog = """\
Load in a partitionmap (generally produced by :program:`partition-graph.py`
or :program:`merge-partitions.py`) and annotate the sequences in the given
files with their partition IDs. Use :program:`extract-partitions.py` to
extract sequences into separate group files.
Example (results will be in ``random-20-a.fa.part``)::
load-graph.py -k 20 example tests/test-data/random-20-a.fa
partition-graph.py example
merge-partitions.py -k 20 example
annotate-partitions.py -k 20 example tests/test-data/random-20-a.fa
"""
parser = KhmerArgumentParser(
description="Annotate sequences with partition IDs.",
epilog=textwrap.dedent(epilog))
parser.add_argument('-k', '--ksize', type=int, default=DEFAULT_K,
help="k-mer size (default: %d)" % DEFAULT_K)
parser.add_argument('graphbase', help='basename for input and output '
'files')
parser.add_argument('input_filenames', metavar='input_sequence_filename',
nargs='+', help='input FAST[AQ] sequences to '
'annotate.')
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
return parser
def main():
args = sanitize_help(get_parser()).parse_args()
ksize = args.ksize
filenames = args.input_filenames
nodegraph = Nodegraph(ksize, 1, 1)
partitionmap_file = args.graphbase + '.pmap.merged'
check_input_files(partitionmap_file, args.force)
for _ in filenames:
check_input_files(_, args.force)
check_space(filenames, args.force)
print('loading partition map from:', partitionmap_file, file=sys.stderr)
nodegraph.load_partitionmap(partitionmap_file)
for infile in filenames:
print('outputting partitions for', infile, file=sys.stderr)
outfile = os.path.basename(infile) + '.part'
part_count = nodegraph.output_partitions(infile, outfile)
print('output %d partitions for %s' % (
part_count, infile), file=sys.stderr)
print('partitions are in', outfile, file=sys.stderr)
if __name__ == '__main__':
main()
| {
"content_hash": "1dd92b1158e589ab88bfa303357edebc",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 34.177215189873415,
"alnum_prop": 0.6544444444444445,
"repo_name": "souravsingh/khmer",
"id": "3d4e98faa4ee859fbea06a7e77c8500de29c0a07",
"size": "4544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/annotate-partitions.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "500623"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "22719"
},
{
"name": "Python",
"bytes": "1282062"
},
{
"name": "Roff",
"bytes": "9581"
},
{
"name": "Shell",
"bytes": "5544"
}
],
"symlink_target": ""
} |
""" Unit tests for pyfive's high_level module. """
import os
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
from numpy.testing import assert_raises
import pyfive
DIRNAME = os.path.dirname(__file__)
EARLIEST_HDF5_FILE = os.path.join(DIRNAME, 'earliest.hdf5')
# Polyglot string type for representing unicode
try:
string_type = unicode
except NameError:
string_type = str
def test_file_class():
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
assert hfile.filename == EARLIEST_HDF5_FILE
assert hfile.mode == 'r'
assert hfile.userblock_size == 0
def test_group_class():
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
grp = hfile['group1']
subgrp = grp['subgroup1']
################
# class methods
################
# __iter__()
count = 0
for i in grp:
count += 1
assert count == 2
# __contains__()
assert 'dataset2' in grp
assert 'subgroup1' in grp
assert 'foobar' not in grp
# __getitem__()
assert grp['subgroup1'].name == '/group1/subgroup1'
assert_raises(KeyError, grp.__getitem__, 'foobar')
# keys()
assert 'dataset2' in grp.keys()
assert 'subgroup1' in grp.keys()
assert 'foobar' not in grp.keys()
# values()
assert len(grp.values()) == 2
# items()
assert len(grp.items()) == 2
# get()
assert grp.get('subgroup1').name == '/group1/subgroup1'
assert grp.get('foobar') is None
####################
# class attributes
####################
attrs = grp.attrs
assert isinstance(attrs, dict)
assert_almost_equal(attrs['attr3'], 12.34, 2)
assert attrs['attr3'].dtype == np.dtype('float32')
assert grp.name == '/group1'
assert grp.file is hfile
assert grp.parent is hfile
assert subgrp.name == '/group1/subgroup1'
assert grp.file is hfile
assert subgrp.parent is grp
def test_dataset_class():
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
dset1 = hfile['dataset1']
grp = hfile['group1']
dset2 = grp['dataset2']
assert_array_equal(dset1[:], np.arange(4))
assert_array_equal(dset2[:], np.arange(4))
assert_array_equal(dset1.value, np.arange(4))
assert_array_equal(dset2.value, np.arange(4))
assert dset1.len() == 4
assert dset2.len() == 4
assert dset1.shape == (4, )
assert dset2.shape == (4, )
assert dset1.ndim == 1
assert dset2.ndim == 1
assert dset1.dtype == np.dtype('<i4')
assert dset2.dtype == np.dtype('>u8')
assert dset1.size == 4
assert dset2.size == 4
assert dset1.chunks is None
assert dset2.chunks is None
assert dset1.compression is None
assert dset2.compression is None
assert dset1.compression_opts is None
assert dset2.compression_opts is None
assert dset1.scaleoffset is None
assert dset2.scaleoffset is None
assert dset1.shuffle is False
assert dset2.shuffle is False
assert dset1.fletcher32 is False
assert dset2.fletcher32 is False
assert isinstance(dset1.attrs, dict)
assert dset1.attrs['attr2'] == 130
assert isinstance(dset2.attrs, dict)
assert dset2.attrs['attr4'] == b'Hi'
assert dset1.name == '/dataset1'
assert dset2.name == '/group1/dataset2'
assert dset1.file is hfile
assert dset2.file is hfile
assert dset1.parent.name == '/'
assert dset2.parent.name == '/group1'
def test_get_objects_by_path():
# gh-15
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
grp = hfile['/group1']
assert hfile['/group1/subgroup1'].name == '/group1/subgroup1'
assert grp['/group1/subgroup1'].name == '/group1/subgroup1'
dset2 = hfile['group1/dataset2/']
assert dset2.name == '/group1/dataset2'
assert_raises(KeyError, hfile.__getitem__, 'group1/fake')
assert_raises(KeyError, hfile.__getitem__, 'group1/subgroup1/fake')
assert_raises(KeyError, hfile.__getitem__, 'group1/dataset2/fake')
def test_astype():
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
dset1 = hfile['dataset1']
assert dset1.dtype == np.dtype('<i4')
with dset1.astype('i2'):
assert dset1[:].dtype == np.dtype('i2')
with dset1.astype('f8'):
assert dset1[:].dtype == np.dtype('f8')
def test_read_direct():
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
dset1 = hfile['dataset1']
arr = np.zeros(4)
dset1.read_direct(arr)
assert_array_equal(arr, [0, 1, 2, 3])
arr = np.zeros(4)
dset1.read_direct(arr, np.s_[:2], np.s_[:2])
assert_array_equal(arr, [0, 1, 0, 0])
arr = np.zeros(4)
dset1.read_direct(arr, np.s_[1:3], np.s_[2:])
assert_array_equal(arr, [0, 0, 1, 2])
def test_raise_error_noseek():
class MockNoSeek(object):
def read(self):
return b'fakedata'
f = MockNoSeek()
assert_raises(ValueError, pyfive.File, f)
def test_raise_error_invalid_dereference():
class MockReference(object):
address_of_reference = 999
mockref = MockReference()
with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
assert_raises(ValueError, hfile._dereference, mockref)
| {
"content_hash": "eb9ec6546056d6ab1ebe8e3dab7b6a53",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 75,
"avg_line_length": 27.146341463414632,
"alnum_prop": 0.5827493261455525,
"repo_name": "jjhelmus/pyfive",
"id": "a23510d71537e3484d68a214630c2148de724d47",
"size": "5565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_high_level.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135796"
}
],
"symlink_target": ""
} |
"""Contains the parent class for Scriptlets."""
# Scriptlet.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
class Scriptlet(object):
def __init__(self, machine, name):
self.machine = machine
self.name = name
self.log = logging.getLogger('Scriptlet.' + name)
self.log.debug("Loading Scriptlet: %s", name)
self.on_load()
def __repr__(self):
return '<Scriptlet.{}>'.format(self.name)
def on_load(self):
"""Automatically called when this Scriptlet loads. It's the intention
that the Scriptlet writer will overwrite this method in the Scriptlet.
"""
pass
| {
"content_hash": "a297f5eae9c02d55b746c4d412b91e82",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 29.535714285714285,
"alnum_prop": 0.6553808948004837,
"repo_name": "qcapen/mpf",
"id": "a357b017c789056ce37a06593618e8a17ed8be5f",
"size": "827",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mpf/system/scriptlet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1839"
},
{
"name": "Makefile",
"bytes": "44"
},
{
"name": "Python",
"bytes": "1407986"
},
{
"name": "Shell",
"bytes": "729"
}
],
"symlink_target": ""
} |
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script shows how to monitor UCS Manager events and define your own
# call back to take specific action on the respective events.
# Usage: watchUcsManagerEvents.py [options]
#
# Options:
# -h, --help show this help message and exit
# -i IP, --ip=IP [Mandatory] UCSM IP Address
# -u USERNAME, --username=USERNAME
# [Mandatory] Account Username for UCSM Login
# -p PASSWORD, --password=PASSWORD
# [Mandatory] Account Password for UCSM Login
#
import getpass
import optparse
from UcsSdk import *
def getpassword(prompt):
if platform.system() == "Linux":
return getpass.unix_getpass(prompt=prompt)
elif platform.system() == "Windows" or platform.system() == "Microsoft":
return getpass.win_getpass(prompt=prompt)
else:
return getpass.getpass(prompt=prompt)
def callback_all(mce):
print 'Received a New Event with ClassId: ' + str(mce.mo.classId)
print "ChangeList: ", mce.changeList
print "EventId: ", mce.eventId
def callback_lsServer(mce):
print 'Received a New Service Profile Event: ' + str(mce.mo.classId)
print "ChangeList: ", mce.changeList
print "EventId: ", mce.eventId
if __name__ == "__main__":
handle = UcsHandle()
try:
parser = optparse.OptionParser()
parser.add_option('-i', '--ip',dest="ip",
help="[Mandatory] UCSM IP Address")
parser.add_option('-u', '--username',dest="userName",
help="[Mandatory] Account Username for UCSM Login")
parser.add_option('-p', '--password',dest="password",
help="[Mandatory] Account Password for UCSM Login")
(options, args) = parser.parse_args()
if not options.ip:
parser.print_help()
parser.error("Provide UCSM IP Address")
if not options.userName:
parser.print_help()
parser.error("Provide UCSM UserName")
if not options.password:
options.password=getpassword("UCSM Password:")
handle.Login(options.ip,options.userName,options.password)
# Add an event handle "ev_all" to montitor the events generated by UCS
# Manager for any of the ClassIds
ev_all = handle.AddEventHandler()
# Get the list of active event handles.
handle.GetEventHandlers()
# Remove an event handle "ev_all"
handle.RemoveEventHandler(ev_all)
# Use your own callback method to take specific action on respective
# events.
ev_all_callback = handle.AddEventHandler(callBack = callback_all)
handle.RemoveEventHandler(ev_all_callback)
# Add an event handle to filter events based on classId = lsServer
ev_lsServer = handle.AddEventHandler(classId = "LsServer",
callBack = callback_lsServer)
handle.RemoveEventHandler(ev_lsServer)
# loop that keeps the script running for us to get events/callbacks
while True:
time.sleep(5)
handle.Logout()
except Exception, err:
handle.Logout()
print "Exception:", str(err)
import traceback, sys
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
handle.Logout()
| {
"content_hash": "bc2b0102226d5a4839ae9d4e2180431f",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 74,
"avg_line_length": 34.39252336448598,
"alnum_prop": 0.678804347826087,
"repo_name": "CiscoUcs/UcsPythonSDK",
"id": "ad80f51d75cfb839f4cd857298ca40993234b0be",
"size": "3699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/watchUcsManagerEvents.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28238274"
}
],
"symlink_target": ""
} |
from . import web_reqrep
from . import web_exceptions
from . import web_urldispatcher
from . import web_ws
from .web_reqrep import * # noqa
from .web_exceptions import * # noqa
from .web_urldispatcher import * # noqa
from .web_ws import * # noqa
from .protocol import HttpVersion # noqa
import asyncio
from . import hdrs
from .abc import AbstractRouter, AbstractMatchInfo
from .log import web_logger
from .server import ServerHttpProtocol
__all__ = (web_reqrep.__all__ +
web_exceptions.__all__ +
web_urldispatcher.__all__ +
web_ws.__all__ +
('Application', 'RequestHandler',
'RequestHandlerFactory', 'HttpVersion'))
class RequestHandler(ServerHttpProtocol):
_meth = 'none'
_path = 'none'
def __init__(self, manager, app, router, *,
secure_proxy_ssl_header=None, **kwargs):
super().__init__(**kwargs)
self._manager = manager
self._app = app
self._router = router
self._middlewares = app.middlewares
self._secure_proxy_ssl_header = secure_proxy_ssl_header
def __repr__(self):
return "<{} {}:{} {}>".format(
self.__class__.__name__, self._meth, self._path,
'connected' if self.transport is not None else 'disconnected')
def connection_made(self, transport):
super().connection_made(transport)
self._manager.connection_made(self, transport)
def connection_lost(self, exc):
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
@asyncio.coroutine
def handle_request(self, message, payload):
if self.access_log:
now = self._loop.time()
app = self._app
request = Request(
app, message, payload,
self.transport, self.reader, self.writer,
secure_proxy_ssl_header=self._secure_proxy_ssl_header)
self._meth = request.method
self._path = request.path
try:
match_info = yield from self._router.resolve(request)
assert isinstance(match_info, AbstractMatchInfo), match_info
resp = None
request._match_info = match_info
expect = request.headers.get(hdrs.EXPECT)
if expect and expect.lower() == "100-continue":
resp = (
yield from match_info.route.handle_expect_header(request))
if resp is None:
handler = match_info.handler
for factory in reversed(self._middlewares):
handler = yield from factory(app, handler)
resp = yield from handler(request)
assert isinstance(resp, StreamResponse), \
("Handler {!r} should return response instance, "
"got {!r} [middlewares {!r}]").format(
match_info.handler, type(resp), self._middlewares)
except HTTPException as exc:
resp = exc
resp_msg = yield from resp.prepare(request)
yield from resp.write_eof()
# notify server about keep-alive
self.keep_alive(resp_msg.keep_alive())
# log access
if self.access_log:
self.log_access(message, None, resp_msg, self._loop.time() - now)
# for repr
self._meth = 'none'
self._path = 'none'
class RequestHandlerFactory:
def __init__(self, app, router, *,
handler=RequestHandler, loop=None,
secure_proxy_ssl_header=None, **kwargs):
self._app = app
self._router = router
self._handler = handler
self._loop = loop
self._connections = {}
self._secure_proxy_ssl_header = secure_proxy_ssl_header
self._kwargs = kwargs
self._kwargs.setdefault('logger', app.logger)
@property
def secure_proxy_ssl_header(self):
return self._secure_proxy_ssl_header
@property
def connections(self):
return list(self._connections.keys())
def connection_made(self, handler, transport):
self._connections[handler] = transport
def connection_lost(self, handler, exc=None):
if handler in self._connections:
del self._connections[handler]
@asyncio.coroutine
def finish_connections(self, timeout=None):
# try to close connections in 90% of graceful timeout
timeout90 = None
if timeout:
timeout90 = timeout / 100 * 90
for handler in self._connections.keys():
handler.closing(timeout=timeout90)
@asyncio.coroutine
def cleanup():
sleep = 0.05
while self._connections:
yield from asyncio.sleep(sleep, loop=self._loop)
if sleep < 5:
sleep = sleep * 2
if timeout:
try:
yield from asyncio.wait_for(
cleanup(), timeout, loop=self._loop)
except asyncio.TimeoutError:
self._app.logger.warning(
"Not all connections are closed (pending: %d)",
len(self._connections))
for transport in self._connections.values():
transport.close()
self._connections.clear()
def __call__(self):
return self._handler(
self, self._app, self._router, loop=self._loop,
secure_proxy_ssl_header=self._secure_proxy_ssl_header,
**self._kwargs)
class Application(dict):
def __init__(self, *, logger=web_logger, loop=None,
router=None, handler_factory=RequestHandlerFactory,
middlewares=()):
if loop is None:
loop = asyncio.get_event_loop()
if router is None:
router = UrlDispatcher()
assert isinstance(router, AbstractRouter), router
self._router = router
self._handler_factory = handler_factory
self._finish_callbacks = []
self._loop = loop
self.logger = logger
for factory in middlewares:
assert asyncio.iscoroutinefunction(factory), factory
self._middlewares = list(middlewares)
@property
def router(self):
return self._router
@property
def loop(self):
return self._loop
@property
def middlewares(self):
return self._middlewares
def make_handler(self, **kwargs):
return self._handler_factory(
self, self.router, loop=self.loop, **kwargs)
@asyncio.coroutine
def finish(self):
callbacks = self._finish_callbacks
self._finish_callbacks = []
for (cb, args, kwargs) in callbacks:
try:
res = cb(self, *args, **kwargs)
if (asyncio.iscoroutine(res) or
isinstance(res, asyncio.Future)):
yield from res
except Exception as exc:
self._loop.call_exception_handler({
'message': "Error in finish callback",
'exception': exc,
'application': self,
})
def register_on_finish(self, func, *args, **kwargs):
self._finish_callbacks.insert(0, (func, args, kwargs))
def __call__(self):
"""gunicorn compatibility"""
return self
def __repr__(self):
return "<Application>"
| {
"content_hash": "7d89b9ad43bb57de3d351749a1a4e8b1",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 78,
"avg_line_length": 30.70124481327801,
"alnum_prop": 0.5683200432490877,
"repo_name": "flying-sheep/aiohttp",
"id": "706fbb04ed6093828ca6cc4723c5d4c9aca883ab",
"size": "7399",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "aiohttp/web.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1359"
},
{
"name": "PowerShell",
"bytes": "3361"
},
{
"name": "Python",
"bytes": "915427"
}
],
"symlink_target": ""
} |
"""Training binary."""
import functools
from typing import Any, Dict, List, Optional, Tuple
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from hypertransformer.tf import common_flags # pylint:disable=unused-import
from hypertransformer.tf import eval_model_flags # pylint:disable=unused-import
from hypertransformer.tf.core import common
from hypertransformer.tf.core import common_ht
from hypertransformer.tf.core import layerwise
from hypertransformer.tf.core import layerwise_defs # pylint:disable=unused-import
from hypertransformer.tf.core import train_lib
from hypertransformer.tf.core import util
FLAGS = flags.FLAGS
def make_train_config():
return common.TrainConfig(train_steps=FLAGS.train_steps,
steps_between_saves=FLAGS.steps_between_saves)
def make_optimizer_config():
return common.OptimizerConfig(learning_rate=FLAGS.learning_rate,
lr_decay_steps=FLAGS.learning_rate_decay_steps,
lr_decay_rate=FLAGS.learning_rate_decay_rate)
def common_model_config():
"""Returns common ModelConfig parameters."""
return {
'num_transformer_samples': FLAGS.samples_transformer,
'num_cnn_samples': FLAGS.samples_cnn,
'num_labels': FLAGS.num_labels,
'image_size': FLAGS.image_size,
'cnn_model_name': FLAGS.cnn_model_name,
'embedding_dim': FLAGS.embedding_dim,
'cnn_dropout_rate': FLAGS.cnn_dropout_rate,
'use_decoder': FLAGS.use_decoder,
'add_trainable_weights': FLAGS.add_trainable_weights,
'var_reg_weight': FLAGS.weight_variation_regularization,
'transformer_activation': FLAGS.transformer_activation,
'transformer_nonlinearity': FLAGS.transformer_nonlinearity,
'cnn_activation': FLAGS.cnn_activation,
'default_num_channels': FLAGS.default_num_channels,
'shared_fe_dropout': FLAGS.shared_fe_dropout,
'fe_dropout': FLAGS.fe_dropout,
}
def make_layerwise_model_config():
"""Makes 'layerwise' model config."""
if not FLAGS.num_layerwise_features:
num_features = None
else:
num_features = int(FLAGS.num_layerwise_features)
if FLAGS.lw_weight_allocation == 'spatial':
weight_allocation = common_ht.WeightAllocation.SPATIAL
elif FLAGS.lw_weight_allocation == 'output':
weight_allocation = common_ht.WeightAllocation.OUTPUT_CHANNEL
else:
raise ValueError(f'Unknown `lw_weight_allocation` flag value '
f'"{FLAGS.lw_weight_allocation}"')
return common_ht.LayerwiseModelConfig(
feature_layers=2,
query_key_dim_frac=FLAGS.lw_key_query_dim,
value_dim_frac=FLAGS.lw_value_dim,
internal_dim_frac=FLAGS.lw_inner_dim,
num_layers=FLAGS.num_layers,
heads=FLAGS.heads,
kernel_size=common_flags.KERNEL_SIZE.value,
stride=common_flags.STRIDE.value,
dropout_rate=FLAGS.dropout_rate,
num_features=num_features,
nonlinear_feature=FLAGS.lw_use_nonlinear_feature,
weight_allocation=weight_allocation,
generate_bn=FLAGS.lw_generate_bn,
generate_bias=FLAGS.lw_generate_bias,
shared_feature_extractor=FLAGS.shared_feature_extractor,
shared_features_dim=FLAGS.shared_features_dim,
separate_bn_vars=FLAGS.separate_evaluation_bn_vars,
shared_feature_extractor_padding=FLAGS.shared_feature_extractor_padding,
generator=FLAGS.layerwise_generator,
train_heads=FLAGS.warmup_steps > 0,
max_prob_remove_unlabeled=FLAGS.max_prob_remove_unlabeled,
max_prob_remove_labeled=FLAGS.max_prob_remove_labeled,
number_of_trained_cnn_layers=(
common_flags.NUMBER_OF_TRAINED_CNN_LAYERS.value),
skip_last_nonlinearity=FLAGS.transformer_skip_last_nonlinearity,
l2_reg_weight=FLAGS.l2_reg_weight,
logits_feature_extractor=FLAGS.logits_feature_extractor,
shared_head_weight=common_flags.SHARED_HEAD_WEIGHT.value,
**common_model_config())
def make_optimizer(optim_config,
global_step):
learning_rate = tf.train.exponential_decay(
optim_config.learning_rate, global_step, optim_config.lr_decay_steps,
optim_config.lr_decay_rate)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
return learning_rate, optimizer
def make_train_op(optimizer,
loss,
train_vars=None):
global_step = tf.train.get_or_create_global_step()
return optimizer.minimize(tf.reduce_mean(loss), global_step=global_step,
var_list=train_vars)
def make_dataset_config(dataset_spec = ''):
if not dataset_spec:
dataset_spec = FLAGS.train_dataset
dataset, label_set = util.parse_dataset_spec(dataset_spec)
if label_set is None:
label_set = list(range(FLAGS.use_labels))
return common_ht.DatasetConfig(
dataset_name=dataset,
use_label_subset=label_set,
tfds_split='train',
data_dir=FLAGS.data_dir,
rotation_probability=FLAGS.rotation_probability,
smooth_probability=FLAGS.smooth_probability,
contrast_probability=FLAGS.contrast_probability,
resize_probability=FLAGS.resize_probability,
negate_probability=FLAGS.negate_probability,
roll_probability=FLAGS.roll_probability,
angle_range=FLAGS.angle_range,
rotate_by_90=FLAGS.random_rotate_by_90,
per_label_augmentation=FLAGS.per_label_augmentation,
cache_path=FLAGS.data_numpy_dir,
balanced_batches=FLAGS.balanced_batches,
shuffle_labels_seed=FLAGS.shuffle_labels_seed,
apply_image_augmentations=FLAGS.apply_image_augmentations,
augment_individually=FLAGS.augment_images_individually,
num_unlabeled_per_class=FLAGS.unlabeled_samples_per_class,
)
def _default(new, default):
return new if new >= 0 else default
def make_test_dataset_config(dataset_spec = ''):
if not dataset_spec:
dataset_spec = FLAGS.test_dataset
dataset, label_set = util.parse_dataset_spec(dataset_spec)
if label_set is None:
raise ValueError('Test dataset should specify a set of labels.')
return common_ht.DatasetConfig(
dataset_name=dataset,
use_label_subset=label_set,
tfds_split=FLAGS.test_split,
data_dir=FLAGS.data_dir,
rotation_probability=_default(FLAGS.test_rotation_probability,
FLAGS.rotation_probability),
smooth_probability=_default(FLAGS.test_smooth_probability,
FLAGS.smooth_probability),
contrast_probability=_default(FLAGS.test_contrast_probability,
FLAGS.contrast_probability),
resize_probability=_default(FLAGS.test_resize_probability,
FLAGS.resize_probability),
negate_probability=_default(FLAGS.test_negate_probability,
FLAGS.negate_probability),
roll_probability=_default(FLAGS.test_roll_probability,
FLAGS.roll_probability),
angle_range=_default(FLAGS.test_angle_range, FLAGS.angle_range),
rotate_by_90=FLAGS.test_random_rotate_by_90,
per_label_augmentation=FLAGS.test_per_label_augmentation,
balanced_batches=FLAGS.balanced_batches,
shuffle_labels_seed=FLAGS.shuffle_labels_seed,
cache_path=FLAGS.data_numpy_dir,
apply_image_augmentations=False,
num_unlabeled_per_class=FLAGS.unlabeled_samples_per_class,
)
def _make_warmup_loss(loss_heads,
loss_prediction,
global_step):
"""Uses head losses to build aggregate loss cycling through them."""
# The warmup period is broken into a set of "head activation periods".
# Each period, one head weight is linearly growing, while the previous
# head weight goes down.
# Basically, each moment of time only two heads are active and the active
# heads slide towards the final layer.
num_heads = len(loss_heads)
steps_per_stage = FLAGS.warmup_steps / num_heads
loss = 0
weights = []
# The following code ends up returning just the true model head loss
# after `global step` reaches `warmup_steps`.
for stage, head_loss in enumerate(loss_heads):
target_steps = stage * steps_per_stage
norm_step_dist = tf.abs(global_step - target_steps) / steps_per_stage
# This weight starts at 0 and peaks reaching 1 at `target_steps`. It then
# decays linearly to 0 and stays 0.
weight = tf.maximum(0.0, 1.0 - norm_step_dist)
weights.append(weight)
loss += weight * head_loss
target_steps = num_heads * steps_per_stage
norm_step_dist = 1.0 + (global_step - target_steps) / steps_per_stage
norm_step_dist = tf.nn.relu(norm_step_dist)
# Weight for the actual objective linearly grows after the final layer head
# peaks and then stays equal to 1.
weight = tf.minimum(1.0, norm_step_dist)
weights.append(weight)
loss += weight * loss_prediction
return loss, weights
def make_loss(labels,
predictions,
heads):
"""Makes a full loss including head 'warmup' losses."""
losses = []
for head in heads + [predictions]:
head_loss = tf.losses.softmax_cross_entropy(
labels, head, label_smoothing=FLAGS.label_smoothing)
losses.append(head_loss)
if len(losses) == 1:
return losses[0], losses, [tf.constant(1.0, dtype=tf.float32)]
global_step = tf.cast(tf.train.get_or_create_global_step(), tf.float32)
loss, wamup_weights = _make_warmup_loss(losses[:-1], losses[-1], global_step)
return loss, losses, wamup_weights
def create_shared_head(shared_features,
real_classes,
real_class_min,
real_class_max
):
"""Creates a real class prediction head from the shared feature."""
if real_classes is None or shared_features is None:
return None, None
if real_class_min is None or real_class_max is None:
tf.logging.warning('Training classes boundaries are not provided. '
'Skippin shared head creation!')
return None, None
total_classes = real_class_max - real_class_min + 1
with tf.variable_scope('shared_head', reuse=tf.AUTO_REUSE):
fc = tf.layers.Dense(units=total_classes, name='fc')
predictions = fc(shared_features)
classes = real_classes - real_class_min
one_hot_gt = tf.one_hot(classes, depth=total_classes)
loss = tf.losses.softmax_cross_entropy(one_hot_gt, predictions,
label_smoothing=FLAGS.label_smoothing)
pred_classes = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
accuracy = tf.cast(tf.math.equal(classes, pred_classes), tf.float32)
num_samples = tf.cast(tf.shape(shared_features)[0], tf.float32)
accuracy = tf.reduce_sum(accuracy) / num_samples
return loss, accuracy
def create_layerwise_model(
model_config,
dataset,
test_dataset,
state,
optim_config):
"""Creates a hierarchichal Transformer-CNN model."""
tf.logging.info('Building the model')
global_step = tf.train.get_or_create_global_step()
model = layerwise.build_model(model_config.cnn_model_name,
model_config=model_config)
with tf.variable_scope('model'):
weight_blocks = model.train(dataset.transformer_images,
dataset.transformer_labels,
mask=dataset.transformer_masks,
mask_random_samples=True,
enable_fe_dropout=True)
predictions = model.evaluate(dataset.cnn_images,
weight_blocks=weight_blocks,
training=False)
heads = []
if model_config.train_heads:
outputs = model.layer_outputs.values()
heads = [output[1] for output in outputs if output[1] is not None]
test_weight_blocks = model.train(test_dataset.transformer_images,
test_dataset.transformer_labels,
mask=test_dataset.transformer_masks)
test_predictions = model.evaluate(test_dataset.cnn_images,
weight_blocks=test_weight_blocks,
training=False)
with tf.variable_scope('loss'):
labels = tf.one_hot(dataset.cnn_labels, depth=model_config.num_labels)
pred_labels = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
num_cnn_samples = tf.cast(tf.shape(dataset.cnn_labels)[0], tf.float32)
def _acc(pred):
accuracy = tf.cast(tf.math.equal(dataset.cnn_labels, pred), tf.float32)
return tf.reduce_sum(accuracy) / num_cnn_samples
accuracy = _acc(pred_labels)
head_preds = [tf.cast(tf.argmax(head, axis=-1), tf.int32) for head in heads]
head_accs = [_acc(pred) for pred in head_preds]
test_pred_labels = tf.cast(tf.argmax(test_predictions, axis=-1), tf.int32)
test_accuracy = tf.cast(
tf.math.equal(test_dataset.cnn_labels, test_pred_labels), tf.float32)
test_accuracy = tf.reduce_sum(test_accuracy) / num_cnn_samples
summaries = []
reg_losses = tf.losses.get_losses(
loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)
if reg_losses:
summaries.append(tf.summary.scalar('loss/regularization',
tf.reduce_sum(reg_losses)))
shared_head_loss, shared_head_acc = create_shared_head(
weight_blocks.shared_features, dataset.transformer_real_classes,
dataset.real_class_min, dataset.real_class_max)
state.loss, _, warmup_weights = make_loss(labels, predictions, heads)
summaries.append(tf.summary.scalar('loss/ce', state.loss))
if reg_losses:
state.loss += tf.reduce_sum(reg_losses)
_, optimizer = make_optimizer(optim_config, global_step)
if shared_head_loss is not None:
if model_config.shared_head_weight > 0.0:
weighted_head_loss = shared_head_loss * model_config.shared_head_weight
state.loss += weighted_head_loss
summaries.append(tf.summary.scalar('loss/shared_head_loss',
shared_head_loss))
summaries.append(tf.summary.scalar('loss/weighted_shared_head_loss',
weighted_head_loss))
for head_id, acc in enumerate(head_accs):
summaries.append(tf.summary.scalar(f'accuracy/head-{head_id+1}', acc))
for head_id, warmup_weight in enumerate(warmup_weights[:-1]):
summaries.append(tf.summary.scalar(f'warmup_weights/head-{head_id+1}',
warmup_weight))
if heads:
summaries.append(tf.summary.scalar('warmup_weights/main',
warmup_weights[-1]))
train_op = make_train_op(optimizer, state.loss)
if shared_head_acc is not None and model_config.shared_head_weight > 0.0:
summaries.append(tf.summary.scalar('accuracy/shared_head_accuracy',
shared_head_acc))
return common.TrainState(
train_op=train_op,
step_initializer=tf.group(dataset.randomize_op,
test_dataset.randomize_op),
large_summaries=[],
small_summaries=summaries + [
tf.summary.scalar('accuracy/accuracy', accuracy),
tf.summary.scalar('accuracy/test_accuracy', test_accuracy),
tf.summary.scalar('loss/loss', state.loss)
],
)
def create_shared_feature_model(
model_config,
dataset,
test_dataset,
state,
optim_config):
"""Creates an image feature extractor model for pre-training."""
del test_dataset
tf.logging.info('Building the model')
global_step = tf.train.get_or_create_global_step()
model = layerwise.build_model(model_config.cnn_model_name,
model_config=model_config)
with tf.variable_scope('model'):
weight_blocks = model.train(dataset.transformer_images,
dataset.transformer_labels,
mask=dataset.transformer_masks,
mask_random_samples=True,
enable_fe_dropout=True,
only_shared_feature=True)
with tf.variable_scope('loss'):
shared_head_loss, shared_head_acc = create_shared_head(
weight_blocks.shared_features, dataset.transformer_real_classes,
dataset.real_class_min, dataset.real_class_max)
assert shared_head_loss is not None
_, optimizer = make_optimizer(optim_config, global_step)
state.loss = shared_head_loss
train_op = make_train_op(optimizer, state.loss)
return common.TrainState(
train_op=train_op,
step_initializer=tf.group(dataset.randomize_op),
large_summaries=[],
small_summaries=[
tf.summary.scalar('loss/shared_head_loss', shared_head_loss),
tf.summary.scalar('accuracy/shared_head_accuracy', shared_head_acc),
],
)
def _cut_index(name):
return name.rsplit(':', 1)[0]
def restore_shared_features():
"""Restores shared feature extractor variables from a checkpoint."""
checkpoint = common_flags.RESTORE_SHARED_FEATURES_FROM.value
if not checkpoint:
return None
all_vars = tf.trainable_variables()
shared_vars = [v for v in all_vars
if v.name.find('model/shared_features') >= 0]
shared_vars += [v for v in all_vars
if v.name.find('loss/shared_head') >= 0]
var_values = util.load_variables(checkpoint,
[_cut_index(v.name) for v in shared_vars])
assign_ops = []
for var in shared_vars:
assign_ops.append(tf.assign(var, var_values[_cut_index(var.name)]))
return tf.group(assign_ops)
def train(train_config,
optimizer_config,
dataset_config,
test_dataset_config,
layerwise_model_config):
"""Main function training the model."""
state = train_lib.ModelState()
tf.logging.info('Creating the dataset')
dataset, dataset_state = train_lib.make_dataset(
model_config=layerwise_model_config, data_config=dataset_config)
test_dataset, _ = train_lib.make_dataset(
model_config=layerwise_model_config, data_config=test_dataset_config,
dataset_state=dataset_state)
args = {'dataset': dataset, 'state': state, 'optim_config': optimizer_config,
'test_dataset': test_dataset}
if common_flags.PRETRAIN_SHARED_FEATURE.value:
create_model = functools.partial(create_shared_feature_model,
model_config=layerwise_model_config)
else:
create_model = functools.partial(create_layerwise_model,
model_config=layerwise_model_config)
tf.logging.info('Training')
train_state = create_model(**args)
with tf.Session():
init_op = restore_shared_features()
restored = common.init_training(train_state)
if not restored and init_op is not None:
sess = tf.get_default_session()
sess.run(init_op)
common.train(train_config, train_state)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.disable_eager_execution()
for gpu in tf.config.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
train(train_config=make_train_config(),
optimizer_config=make_optimizer_config(),
dataset_config=make_dataset_config(),
test_dataset_config=make_test_dataset_config(),
layerwise_model_config=make_layerwise_model_config())
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "dc858c4f38ad2e44683ecacd9da05075",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 83,
"avg_line_length": 40.18069815195072,
"alnum_prop": 0.6565310711365495,
"repo_name": "google-research/google-research",
"id": "80cc818b9ccf4ab93fb66b045db5ce7826b9a261",
"size": "20176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hypertransformer/tf/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
def memo(f):
memos = {}
def memoized(*args):
try: return memos[args]
except KeyError:
result = memos[args] = f(*args)
return result
return memoized
| {
"content_hash": "88aa8c2838bf0c7bf1a4750d737ed20b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 25,
"alnum_prop": 0.53,
"repo_name": "darius/languagetoys",
"id": "8eeaf6b18164cfe09f2eaa2eb6f18eb1f33b5323",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60882"
},
{
"name": "Shell",
"bytes": "171"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('daiquiri_jobs', '0015_job_index'),
]
operations = [
migrations.AlterModelOptions(
name='job',
options={'ordering': ('start_time',), 'verbose_name': 'Job', 'verbose_name_plural': 'Jobs'},
),
]
| {
"content_hash": "a5f4d9fc494271f28c4b98502ef00729",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 104,
"avg_line_length": 23.4,
"alnum_prop": 0.5698005698005698,
"repo_name": "aipescience/django-daiquiri",
"id": "d070d40c35975726594efcbfea2e0e05020a43a2",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daiquiri/jobs/migrations/0016_django2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28598"
},
{
"name": "HTML",
"bytes": "236579"
},
{
"name": "JavaScript",
"bytes": "97087"
},
{
"name": "Python",
"bytes": "602159"
}
],
"symlink_target": ""
} |
"""
vcs.nodes
~~~~~~~~~
Module holding everything related to vcs nodes.
:created_on: Apr 8, 2010
:copyright: (c) 2010-2011 by Marcin Kuzminski, Lukasz Balcerzak.
"""
import os
import stat
import posixpath
import mimetypes
from pygments import lexers
from vcs.backends.base import EmptyChangeset
from vcs.exceptions import NodeError, RemovedFileNodeError
from vcs.utils.lazy import LazyProperty
from vcs.utils import safe_unicode
class NodeKind:
SUBMODULE = -1
DIR = 1
FILE = 2
class NodeState:
ADDED = u'added'
CHANGED = u'changed'
NOT_CHANGED = u'not changed'
REMOVED = u'removed'
class NodeGeneratorBase(object):
"""
Base class for removed added and changed filenodes, it's a lazy generator
class that will create filenodes only on iteration or call
The len method doesn't need to create filenodes at all
"""
def __init__(self, current_paths, cs):
self.cs = cs
self.current_paths = current_paths
def __call__(self):
return [n for n in self]
def __getslice__(self, i, j):
for p in self.current_paths[i:j]:
yield self.cs.get_node(p)
def __len__(self):
return len(self.current_paths)
def __iter__(self):
for p in self.current_paths:
yield self.cs.get_node(p)
class AddedFileNodesGenerator(NodeGeneratorBase):
"""
Class holding Added files for current changeset
"""
pass
class ChangedFileNodesGenerator(NodeGeneratorBase):
"""
Class holding Changed files for current changeset
"""
pass
class RemovedFileNodesGenerator(NodeGeneratorBase):
"""
Class holding removed files for current changeset
"""
def __iter__(self):
for p in self.current_paths:
yield RemovedFileNode(path=p)
def __getslice__(self, i, j):
for p in self.current_paths[i:j]:
yield RemovedFileNode(path=p)
class Node(object):
"""
Simplest class representing file or directory on repository. SCM backends
should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node``
directly.
Node's ``path`` cannot start with slash as we operate on *relative* paths
only. Moreover, every single node is identified by the ``path`` attribute,
so it cannot end with slash, too. Otherwise, path could lead to mistakes.
"""
def __init__(self, path, kind):
if path.startswith('/'):
raise NodeError("Cannot initialize Node objects with slash at "
"the beginning as only relative paths are supported")
self.path = path.rstrip('/')
if path == '' and kind != NodeKind.DIR:
raise NodeError("Only DirNode and its subclasses may be "
"initialized with empty path")
self.kind = kind
#self.dirs, self.files = [], []
if self.is_root() and not self.is_dir():
raise NodeError("Root node cannot be FILE kind")
@LazyProperty
def parent(self):
parent_path = self.get_parent_path()
if parent_path:
if self.changeset:
return self.changeset.get_node(parent_path)
return DirNode(parent_path)
return None
@LazyProperty
def unicode_path(self):
return safe_unicode(self.path)
@LazyProperty
def name(self):
"""
Returns name of the node so if its path
then only last part is returned.
"""
return safe_unicode(self.path.rstrip('/').split('/')[-1])
def _get_kind(self):
return self._kind
def _set_kind(self, kind):
if hasattr(self, '_kind'):
raise NodeError("Cannot change node's kind")
else:
self._kind = kind
# Post setter check (path's trailing slash)
if self.path.endswith('/'):
raise NodeError("Node's path cannot end with slash")
kind = property(_get_kind, _set_kind)
def __cmp__(self, other):
"""
Comparator using name of the node, needed for quick list sorting.
"""
kind_cmp = cmp(self.kind, other.kind)
if kind_cmp:
return kind_cmp
return cmp(self.name, other.name)
def __eq__(self, other):
for attr in ['name', 'path', 'kind']:
if getattr(self, attr) != getattr(other, attr):
return False
if self.is_file():
if self.content != other.content:
return False
else:
# For DirNode's check without entering each dir
self_nodes_paths = list(sorted(n.path for n in self.nodes))
other_nodes_paths = list(sorted(n.path for n in self.nodes))
if self_nodes_paths != other_nodes_paths:
return False
return True
def __nq__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.path)
def __str__(self):
return self.__repr__()
def __unicode__(self):
return self.name
def get_parent_path(self):
"""
Returns node's parent path or empty string if node is root.
"""
if self.is_root():
return ''
return posixpath.dirname(self.path.rstrip('/')) + '/'
def is_file(self):
"""
Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False``
otherwise.
"""
return self.kind == NodeKind.FILE
def is_dir(self):
"""
Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False``
otherwise.
"""
return self.kind == NodeKind.DIR
def is_root(self):
"""
Returns ``True`` if node is a root node and ``False`` otherwise.
"""
return self.kind == NodeKind.DIR and self.path == ''
def is_submodule(self):
"""
Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False``
otherwise.
"""
return self.kind == NodeKind.SUBMODULE
@LazyProperty
def added(self):
return self.state is NodeState.ADDED
@LazyProperty
def changed(self):
return self.state is NodeState.CHANGED
@LazyProperty
def not_changed(self):
return self.state is NodeState.NOT_CHANGED
@LazyProperty
def removed(self):
return self.state is NodeState.REMOVED
class FileNode(Node):
"""
Class representing file nodes.
:attribute: path: path to the node, relative to repostiory's root
:attribute: content: if given arbitrary sets content of the file
:attribute: changeset: if given, first time content is accessed, callback
:attribute: mode: octal stat mode for a node. Default is 0100644.
"""
def __init__(self, path, content=None, changeset=None, mode=None):
"""
Only one of ``content`` and ``changeset`` may be given. Passing both
would raise ``NodeError`` exception.
:param path: relative path to the node
:param content: content may be passed to constructor
:param changeset: if given, will use it to lazily fetch content
:param mode: octal representation of ST_MODE (i.e. 0100644)
"""
if content and changeset:
raise NodeError("Cannot use both content and changeset")
super(FileNode, self).__init__(path, kind=NodeKind.FILE)
self.changeset = changeset
self._content = content
self._mode = mode or 0100644
@LazyProperty
def mode(self):
"""
Returns lazily mode of the FileNode. If ``changeset`` is not set, would
use value given at initialization or 0100644 (default).
"""
if self.changeset:
mode = self.changeset.get_file_mode(self.path)
else:
mode = self._mode
return mode
def _get_content(self):
if self.changeset:
content = self.changeset.get_file_content(self.path)
else:
content = self._content
return content
@property
def content(self):
"""
Returns lazily content of the FileNode. If possible, would try to
decode content from UTF-8.
"""
content = self._get_content()
if bool(content and '\0' in content):
return content
return safe_unicode(content)
@LazyProperty
def size(self):
if self.changeset:
return self.changeset.get_file_size(self.path)
raise NodeError("Cannot retrieve size of the file without related "
"changeset attribute")
@LazyProperty
def message(self):
if self.changeset:
return self.last_changeset.message
raise NodeError("Cannot retrieve message of the file without related "
"changeset attribute")
@LazyProperty
def last_changeset(self):
if self.changeset:
return self.changeset.get_file_changeset(self.path)
raise NodeError("Cannot retrieve last changeset of the file without "
"related changeset attribute")
def get_mimetype(self):
"""
Mimetype is calculated based on the file's content. If ``_mimetype``
attribute is available, it will be returned (backends which store
mimetypes or can easily recognize them, should set this private
attribute to indicate that type should *NOT* be calculated).
"""
if hasattr(self, '_mimetype'):
if (isinstance(self._mimetype, (tuple, list,)) and
len(self._mimetype) == 2):
return self._mimetype
else:
raise NodeError('given _mimetype attribute must be an 2 '
'element list or tuple')
mtype, encoding = mimetypes.guess_type(self.name)
if mtype is None:
if self.is_binary:
mtype = 'application/octet-stream'
encoding = None
else:
mtype = 'text/plain'
encoding = None
return mtype, encoding
@LazyProperty
def mimetype(self):
"""
Wrapper around full mimetype info. It returns only type of fetched
mimetype without the encoding part. use get_mimetype function to fetch
full set of (type,encoding)
"""
return self.get_mimetype()[0]
@LazyProperty
def mimetype_main(self):
return self.mimetype.split('/')[0]
@LazyProperty
def lexer(self):
"""
Returns pygment's lexer class. Would try to guess lexer taking file's
content, name and mimetype.
"""
try:
lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False)
except lexers.ClassNotFound:
lexer = lexers.TextLexer(stripnl=False)
# returns first alias
return lexer
@LazyProperty
def lexer_alias(self):
"""
Returns first alias of the lexer guessed for this file.
"""
return self.lexer.aliases[0]
@LazyProperty
def history(self):
"""
Returns a list of changeset for this file in which the file was changed
"""
if self.changeset is None:
raise NodeError('Unable to get changeset for this FileNode')
return self.changeset.get_file_history(self.path)
@LazyProperty
def annotate(self):
"""
Returns a list of three element tuples with lineno,changeset and line
"""
if self.changeset is None:
raise NodeError('Unable to get changeset for this FileNode')
return self.changeset.get_file_annotate(self.path)
@LazyProperty
def state(self):
if not self.changeset:
raise NodeError("Cannot check state of the node if it's not "
"linked with changeset")
elif self.path in (node.path for node in self.changeset.added):
return NodeState.ADDED
elif self.path in (node.path for node in self.changeset.changed):
return NodeState.CHANGED
else:
return NodeState.NOT_CHANGED
@property
def is_binary(self):
"""
Returns True if file has binary content.
"""
_bin = '\0' in self._get_content()
return _bin
@LazyProperty
def extension(self):
"""Returns filenode extension"""
return self.name.split('.')[-1]
def is_executable(self):
"""
Returns ``True`` if file has executable flag turned on.
"""
return bool(self.mode & stat.S_IXUSR)
def __repr__(self):
return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
getattr(self.changeset, 'short_id', ''))
class RemovedFileNode(FileNode):
"""
Dummy FileNode class - trying to access any public attribute except path,
name, kind or state (or methods/attributes checking those two) would raise
RemovedFileNodeError.
"""
ALLOWED_ATTRIBUTES = [
'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind',
'added', 'changed', 'not_changed', 'removed'
]
def __init__(self, path):
"""
:param path: relative path to the node
"""
super(RemovedFileNode, self).__init__(path=path)
def __getattribute__(self, attr):
if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES:
return super(RemovedFileNode, self).__getattribute__(attr)
raise RemovedFileNodeError("Cannot access attribute %s on "
"RemovedFileNode" % attr)
@LazyProperty
def state(self):
return NodeState.REMOVED
class DirNode(Node):
"""
DirNode stores list of files and directories within this node.
Nodes may be used standalone but within repository context they
lazily fetch data within same repositorty's changeset.
"""
def __init__(self, path, nodes=(), changeset=None):
"""
Only one of ``nodes`` and ``changeset`` may be given. Passing both
would raise ``NodeError`` exception.
:param path: relative path to the node
:param nodes: content may be passed to constructor
:param changeset: if given, will use it to lazily fetch content
:param size: always 0 for ``DirNode``
"""
if nodes and changeset:
raise NodeError("Cannot use both nodes and changeset")
super(DirNode, self).__init__(path, NodeKind.DIR)
self.changeset = changeset
self._nodes = nodes
@LazyProperty
def content(self):
raise NodeError("%s represents a dir and has no ``content`` attribute"
% self)
@LazyProperty
def nodes(self):
if self.changeset:
nodes = self.changeset.get_nodes(self.path)
else:
nodes = self._nodes
self._nodes_dict = dict((node.path, node) for node in nodes)
return sorted(nodes)
@LazyProperty
def files(self):
return sorted((node for node in self.nodes if node.is_file()))
@LazyProperty
def dirs(self):
return sorted((node for node in self.nodes if node.is_dir()))
def __iter__(self):
for node in self.nodes:
yield node
def get_node(self, path):
"""
Returns node from within this particular ``DirNode``, so it is now
allowed to fetch, i.e. node located at 'docs/api/index.rst' from node
'docs'. In order to access deeper nodes one must fetch nodes between
them first - this would work::
docs = root.get_node('docs')
docs.get_node('api').get_node('index.rst')
:param: path - relative to the current node
.. note::
To access lazily (as in example above) node have to be initialized
with related changeset object - without it node is out of
context and may know nothing about anything else than nearest
(located at same level) nodes.
"""
try:
path = path.rstrip('/')
if path == '':
raise NodeError("Cannot retrieve node without path")
self.nodes # access nodes first in order to set _nodes_dict
paths = path.split('/')
if len(paths) == 1:
if not self.is_root():
path = '/'.join((self.path, paths[0]))
else:
path = paths[0]
return self._nodes_dict[path]
elif len(paths) > 1:
if self.changeset is None:
raise NodeError("Cannot access deeper "
"nodes without changeset")
else:
path1, path2 = paths[0], '/'.join(paths[1:])
return self.get_node(path1).get_node(path2)
else:
raise KeyError
except KeyError:
raise NodeError("Node does not exist at %s" % path)
@LazyProperty
def state(self):
raise NodeError("Cannot access state of DirNode")
@LazyProperty
def size(self):
size = 0
for root, dirs, files in self.changeset.walk(self.path):
for f in files:
size += f.size
return size
def __repr__(self):
return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
getattr(self.changeset, 'short_id', ''))
class RootNode(DirNode):
"""
DirNode being the root node of the repository.
"""
def __init__(self, nodes=(), changeset=None):
super(RootNode, self).__init__(path='', nodes=nodes,
changeset=changeset)
def __repr__(self):
return '<%s>' % self.__class__.__name__
class SubModuleNode(Node):
"""
represents a SubModule of Git or SubRepo of Mercurial
"""
is_binary = False
size = 0
def __init__(self, name, url=None, changeset=None, alias=None):
self.path = name
self.kind = NodeKind.SUBMODULE
self.alias = alias
# we have to use emptyChangeset here since this can point to svn/git/hg
# submodules we cannot get from repository
self.changeset = EmptyChangeset(str(changeset), alias=alias)
self.url = url or self._extract_submodule_url()
def __repr__(self):
return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
getattr(self.changeset, 'short_id', ''))
def _extract_submodule_url(self):
if self.alias == 'git':
#TODO: find a way to parse gits submodule file and extract the
# linking URL
return self.path
if self.alias == 'hg':
return self.path
@LazyProperty
def name(self):
"""
Returns name of the node so if its path
then only last part is returned.
"""
org = safe_unicode(self.path.rstrip('/').split('/')[-1])
return u'%s @ %s' % (org, self.changeset.short_id)
| {
"content_hash": "08f6452617b655bed0cf81b075ff45e8",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 91,
"avg_line_length": 31.008116883116884,
"alnum_prop": 0.5803884613371028,
"repo_name": "velodee/vcs",
"id": "5ad93a4860077507577e784e7741d14dd32c3b15",
"size": "19125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcs/nodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14053"
},
{
"name": "JavaScript",
"bytes": "498"
},
{
"name": "Python",
"bytes": "439998"
},
{
"name": "Shell",
"bytes": "3558"
}
],
"symlink_target": ""
} |
"""
differential_evolution: The differential evolution global optimization algorithm
Added by Andrew Nelson 2014
"""
import warnings
import numpy as np
from scipy.optimize import OptimizeResult, minimize
from scipy.optimize.optimize import _status_message
from scipy._lib._util import check_random_state, MapWrapper
from scipy.optimize._constraints import (Bounds, new_bounds_to_old,
NonlinearConstraint, LinearConstraint)
from scipy.sparse import issparse
__all__ = ['differential_evolution']
_MACHEPS = np.finfo(np.float64).eps
def differential_evolution(func, bounds, args=(), strategy='best1bin',
maxiter=1000, popsize=15, tol=0.01,
mutation=(0.5, 1), recombination=0.7, seed=None,
callback=None, disp=False, polish=True,
init='latinhypercube', atol=0, updating='immediate',
workers=1, constraints=(), x0=None):
"""Finds the global minimum of a multivariate function.
Differential Evolution is stochastic in nature (does not use gradient
methods) to find the minimum, and can search large areas of candidate
space, but often requires larger numbers of function evaluations than
conventional gradient-based techniques.
The algorithm is due to Storn and Price [1]_.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence or `Bounds`
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. ``(min, max)`` pairs for each element in ``x``, defining the finite
lower and upper bounds for the optimizing argument of `func`. It is
required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used
to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'.
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals. This keyword is overridden if an
initial population is supplied via the `init` keyword. When using
``init='sobol'`` the population size is calculated as the next power
of 2 after ``popsize * len(x)``.
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
``U[min, max)``. Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Prints the evaluated `func` at every iteration.
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly. If a constrained problem is
being studied then the `trust-constr` method is used instead.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'sobol'
- 'halton'
- 'random'
- array specifying the initial population. The array should have
shape ``(M, len(x))``, where M is the total population size and
len(x) is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space.
'sobol' and 'halton' are superior alternatives and maximize even more
the parameter space. 'sobol' will enforce an initial population
size which is calculated as the next power of 2 after
``popsize * len(x)``. 'halton' has no requirements but is a bit less
efficient. See `scipy.stats.qmc` for more details.
'random' initializes the population randomly - this has the drawback
that clustering can occur, preventing the whole of parameter space
being covered. Use of an array to specify a population could be used,
for example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
updating : {'immediate', 'deferred'}, optional
If ``'immediate'``, the best solution vector is continuously updated
within a single generation [4]_. This can lead to faster convergence as
trial vectors can take advantage of continuous improvements in the best
solution.
With ``'deferred'``, the best solution vector is updated once per
generation. Only ``'deferred'`` is compatible with parallelization, and
the `workers` keyword can over-ride this option.
.. versionadded:: 1.2.0
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel
(uses `multiprocessing.Pool <multiprocessing>`).
Supply -1 to use all available CPU cores.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
This option will override the `updating` keyword to
``updating='deferred'`` if ``workers != 1``.
Requires that `func` be pickleable.
.. versionadded:: 1.2.0
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
Constraints on the solver, over and above those applied by the `bounds`
kwd. Uses the approach by Lampinen [5]_.
.. versionadded:: 1.4.0
x0 : None or array-like, optional
Provides an initial guess to the minimization. Once the population has
been initialized this vector replaces the first (best) member. This
replacement is done even if `init` is given an initial population.
.. versionadded:: 1.7.0
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing, then
OptimizeResult also contains the ``jac`` attribute.
If the eventual solution does not satisfy the applied constraints
``success`` will be `False`.
Notes
-----
Differential evolution is a stochastic population based method that is
useful for global optimization problems. At each pass through the population
the algorithm mutates each candidate solution by mixing with other candidate
solutions to create a trial candidate. There are several strategies [2]_ for
creating trial candidates, which suit some problems more than others. The
'best1bin' strategy is a good starting point for many systems. In this
strategy two members of the population are randomly chosen. Their difference
is used to mutate the best member (the 'best' in 'best1bin'), :math:`b_0`,
so far:
.. math::
b' = b_0 + mutation * (population[rand0] - population[rand1])
A trial vector is then constructed. Starting with a randomly chosen ith
parameter the trial is sequentially filled (in modulo) with parameters from
``b'`` or the original candidate. The choice of whether to use ``b'`` or the
original candidate is made with a binomial distribution (the 'bin' in
'best1bin') - a random number in [0, 1) is generated. If this number is
less than the `recombination` constant then the parameter is loaded from
``b'``, otherwise it is loaded from the original candidate. The final
parameter is always loaded from ``b'``. Once the trial candidate is built
its fitness is assessed. If the trial is better than the original candidate
then it takes its place. If it is also better than the best overall
candidate it also replaces that.
To improve your chances of finding a global minimum use higher `popsize`
values, with higher `mutation` and (dithering), but lower `recombination`
values. This has the effect of widening the search radius, but slowing
convergence.
By default the best solution vector is updated continuously within a single
iteration (``updating='immediate'``). This is a modification [4]_ of the
original differential evolution algorithm which can lead to faster
convergence as trial vectors can immediately benefit from improved
solutions. To use the original Storn and Price behaviour, updating the best
solution once per iteration, set ``updating='deferred'``.
.. versionadded:: 0.15.0
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function is implemented in `rosen` in `scipy.optimize`.
>>> from scipy.optimize import rosen, differential_evolution
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
>>> result = differential_evolution(rosen, bounds)
>>> result.x, result.fun
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
Now repeat, but with parallelization.
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
>>> result = differential_evolution(rosen, bounds, updating='deferred',
... workers=2)
>>> result.x, result.fun
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
Let's try and do a constrained minimization
>>> from scipy.optimize import NonlinearConstraint, Bounds
>>> def constr_f(x):
... return np.array(x[0] + x[1])
>>>
>>> # the sum of x[0] and x[1] must be less than 1.9
>>> nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
>>> # specify limits using a `Bounds` object.
>>> bounds = Bounds([0., 0.], [2., 2.])
>>> result = differential_evolution(rosen, bounds, constraints=(nlc),
... seed=1)
>>> result.x, result.fun
(array([0.96633867, 0.93363577]), 0.0011361355854792312)
Next find the minimum of the Ackley function
(https://en.wikipedia.org/wiki/Test_functions_for_optimization).
>>> from scipy.optimize import differential_evolution
>>> import numpy as np
>>> def ackley(x):
... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
>>> bounds = [(-5, 5), (-5, 5)]
>>> result = differential_evolution(ackley, bounds)
>>> result.x, result.fun
(array([ 0., 0.]), 4.4408920985006262e-16)
References
----------
.. [1] Storn, R and Price, K, Differential Evolution - a Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces,
Journal of Global Optimization, 1997, 11, 341 - 359.
.. [2] http://www1.icsi.berkeley.edu/~storn/code.html
.. [3] http://en.wikipedia.org/wiki/Differential_evolution
.. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -
Characterization of structures from X-ray scattering data using
genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,
2827-2848
.. [5] Lampinen, J., A constraint handling approach for the differential
evolution algorithm. Proceedings of the 2002 Congress on
Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,
2002.
"""
# using a context manager means that any created Pool objects are
# cleared up.
with DifferentialEvolutionSolver(func, bounds, args=args,
strategy=strategy,
maxiter=maxiter,
popsize=popsize, tol=tol,
mutation=mutation,
recombination=recombination,
seed=seed, polish=polish,
callback=callback,
disp=disp, init=init, atol=atol,
updating=updating,
workers=workers,
constraints=constraints,
x0=x0) as solver:
ret = solver.solve()
return ret
class DifferentialEvolutionSolver:
"""This class implements the differential evolution solver
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence or `Bounds`
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. ``(min, max)`` pairs for each element in ``x``, defining the finite
lower and upper bounds for the optimizing argument of `func`. It is
required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used
to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals. This keyword is overridden if an
initial population is supplied via the `init` keyword. When using
``init='sobol'`` the population size is calculated as the next power
of 2 after ``popsize * len(x)``.
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
U[min, max). Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Prints the evaluated `func` at every iteration.
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly. If a constrained problem is
being studied then the `trust-constr` method is used instead.
maxfun : int, optional
Set the maximum number of function evaluations. However, it probably
makes more sense to set `maxiter` instead.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'sobol'
- 'halton'
- 'random'
- array specifying the initial population. The array should have
shape ``(M, len(x))``, where M is the total population size and
len(x) is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space.
'sobol' and 'halton' are superior alternatives and maximize even more
the parameter space. 'sobol' will enforce an initial population
size which is calculated as the next power of 2 after
``popsize * len(x)``. 'halton' has no requirements but is a bit less
efficient. See `scipy.stats.qmc` for more details.
'random' initializes the population randomly - this has the drawback
that clustering can occur, preventing the whole of parameter space
being covered. Use of an array to specify a population could be used,
for example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
updating : {'immediate', 'deferred'}, optional
If `immediate` the best solution vector is continuously updated within
a single generation. This can lead to faster convergence as trial
vectors can take advantage of continuous improvements in the best
solution.
With `deferred` the best solution vector is updated once per
generation. Only `deferred` is compatible with parallelization, and the
`workers` keyword can over-ride this option.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel
(uses `multiprocessing.Pool <multiprocessing>`).
Supply `-1` to use all cores available to the Process.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
This option will override the `updating` keyword to
`updating='deferred'` if `workers != 1`.
Requires that `func` be pickleable.
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
Constraints on the solver, over and above those applied by the `bounds`
kwd. Uses the approach by Lampinen.
x0 : None or array-like, optional
Provides an initial guess to the minimization. Once the population has
been initialized this vector replaces the first (best) member. This
replacement is done even if `init` is given an initial population.
"""
# Dispatch of mutation strategy method (binomial or exponential).
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'currenttobest1bin': '_currenttobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'currenttobest1exp': '_currenttobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
__init_error_msg = ("The population initialization method must be one of "
"'latinhypercube' or 'random', or an array of shape "
"(M, N) where N is the number of parameters and M>5")
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=1000, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
maxfun=np.inf, callback=None, disp=False, polish=True,
init='latinhypercube', atol=0, updating='immediate',
workers=1, constraints=(), x0=None):
if strategy in self._binomial:
self.mutation_func = getattr(self, self._binomial[strategy])
elif strategy in self._exponential:
self.mutation_func = getattr(self, self._exponential[strategy])
else:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = callback
self.polish = polish
# set the updating / parallelisation options
if updating in ['immediate', 'deferred']:
self._updating = updating
# want to use parallelisation, but updating is immediate
if workers != 1 and updating == 'immediate':
warnings.warn("differential_evolution: the 'workers' keyword has"
" overridden updating='immediate' to"
" updating='deferred'", UserWarning)
self._updating = 'deferred'
# an object with a map method.
self._mapwrapper = MapWrapper(workers)
# relative and absolute tolerances for convergence
self.tol, self.atol = tol, atol
# Mutation constant should be in [0, 2). If specified as a sequence
# then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
# we create a wrapped function to allow the use of map (and Pool.map
# in the future)
self.func = _FunctionWrapper(func, args)
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
if isinstance(bounds, Bounds):
self.limits = np.array(new_bounds_to_old(bounds.lb,
bounds.ub,
len(bounds.lb)),
dtype=float).T
else:
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2 or not
np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing '
'real valued (min, max) pairs for each value'
' in x')
if maxiter is None: # the default used to be None
maxiter = 1000
self.maxiter = maxiter
if maxfun is None: # the default used to be None
maxfun = np.inf
self.maxfun = maxfun
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
self.parameter_count = np.size(self.limits, 1)
self.random_number_generator = check_random_state(seed)
# default population initialization is a latin hypercube design, but
# there are other population initializations possible.
# the minimum is 5 because 'best2bin' requires a population that's at
# least 5 long
self.num_population_members = max(5, popsize * self.parameter_count)
self.population_shape = (self.num_population_members,
self.parameter_count)
self._nfev = 0
# check first str otherwise will fail to compare str with array
if isinstance(init, str):
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'sobol':
# must be Ns = 2**m for Sobol'
n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))
self.num_population_members = n_s
self.population_shape = (self.num_population_members,
self.parameter_count)
self.init_population_qmc(qmc_engine='sobol')
elif init == 'halton':
self.init_population_qmc(qmc_engine='halton')
elif init == 'random':
self.init_population_random()
else:
raise ValueError(self.__init_error_msg)
else:
self.init_population_array(init)
if x0 is not None:
# scale to within unit interval and
# ensure parameters are within bounds.
x0_scaled = self._unscale_parameters(np.asarray(x0))
if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():
raise ValueError(
"Some entries in x0 lay outside the specified bounds"
)
self.population[0] = x0_scaled
# infrastructure for constraints
self.constraints = constraints
self._wrapped_constraints = []
if hasattr(constraints, '__len__'):
# sequence of constraints, this will also deal with default
# keyword parameter
for c in constraints:
self._wrapped_constraints.append(
_ConstraintWrapper(c, self.x)
)
else:
self._wrapped_constraints = [
_ConstraintWrapper(constraints, self.x)
]
self.constraint_violation = np.zeros((self.num_population_members, 1))
self.feasible = np.ones(self.num_population_members, bool)
self.disp = disp
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling.
Latin Hypercube Sampling ensures that each parameter is uniformly
sampled over its range.
"""
rng = self.random_number_generator
# Each parameter range needs to be sampled uniformly. The scaled
# parameter range ([0, 1)) needs to be split into
# `self.num_population_members` segments, each of which has the following
# size:
segsize = 1.0 / self.num_population_members
# Within each segment we sample from a uniform random distribution.
# We need to do this sampling for each parameter.
samples = (segsize * rng.uniform(size=self.population_shape)
# Offset each segment to cover the entire parameter range [0, 1)
+ np.linspace(0., 1., self.num_population_members,
endpoint=False)[:, np.newaxis])
# Create an array for population of candidate solutions.
self.population = np.zeros_like(samples)
# Initialize population of candidate solutions by permutation of the
# random samples.
for j in range(self.parameter_count):
order = rng.permutation(range(self.num_population_members))
self.population[:, j] = samples[order, j]
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_qmc(self, qmc_engine):
"""Initializes the population with a QMC method.
QMC methods ensures that each parameter is uniformly
sampled over its range.
Parameters
----------
qmc_engine : str
The QMC method to use for initialization. Can be one of
``latinhypercube``, ``sobol`` or ``halton``.
"""
from scipy.stats import qmc
rng = self.random_number_generator
# Create an array for population of candidate solutions.
if qmc_engine == 'latinhypercube':
sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)
elif qmc_engine == 'sobol':
sampler = qmc.Sobol(d=self.parameter_count, seed=rng)
elif qmc_engine == 'halton':
sampler = qmc.Halton(d=self.parameter_count, seed=rng)
else:
raise ValueError(self.__init_error_msg)
self.population = sampler.random(n=self.num_population_members)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_random(self):
"""
Initializes the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.uniform(size=self.population_shape)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_array(self, init):
"""
Initializes the population with a user specified population.
Parameters
----------
init : np.ndarray
Array specifying subset of the initial population. The array should
have shape (M, len(x)), where len(x) is the number of parameters.
The population is clipped to the lower and upper bounds.
"""
# make sure you're using a float array
popn = np.asfarray(init)
if (np.size(popn, 0) < 5 or
popn.shape[1] != self.parameter_count or
len(popn.shape) != 2):
raise ValueError("The population supplied needs to have shape"
" (M, len(x)), where M > 4.")
# scale values and clip to bounds, assigning to population
self.population = np.clip(self._unscale_parameters(popn), 0, 1)
self.num_population_members = np.size(self.population, 0)
self.population_shape = (self.num_population_members,
self.parameter_count)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
@property
def x(self):
"""
The best solution from the solver
"""
return self._scale_parameters(self.population[0])
@property
def convergence(self):
"""
The standard deviation of the population energies divided by their
mean.
"""
if np.any(np.isinf(self.population_energies)):
return np.inf
return (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) + _MACHEPS))
def converged(self):
"""
Return True if the solver has converged.
"""
if np.any(np.isinf(self.population_energies)):
return False
return (np.std(self.population_energies) <=
self.atol +
self.tol * np.abs(np.mean(self.population_energies)))
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing,
then OptimizeResult also contains the ``jac`` attribute.
"""
nit, warning_flag = 0, False
status_message = _status_message['success']
# The population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies.
# Although this is also done in the evolve generator it's possible
# that someone can set maxiter=0, at which point we still want the
# initial energies to be calculated (the following loop isn't run).
if np.all(np.isinf(self.population_energies)):
self.feasible, self.constraint_violation = (
self._calculate_population_feasibilities(self.population))
# only work out population energies for feasible solutions
self.population_energies[self.feasible] = (
self._calculate_population_energies(
self.population[self.feasible]))
self._promote_lowest_energy()
# do the optimization.
for nit in range(1, self.maxiter + 1):
# evolve the population by a generation
try:
next(self)
except StopIteration:
warning_flag = True
if self._nfev > self.maxfun:
status_message = _status_message['maxfev']
elif self._nfev == self.maxfun:
status_message = ('Maximum number of function evaluations'
' has been reached.')
break
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
if self.callback:
c = self.tol / (self.convergence + _MACHEPS)
warning_flag = bool(self.callback(self.x, convergence=c))
if warning_flag:
status_message = ('callback function requested stop early'
' by returning True')
# should the solver terminate?
if warning_flag or self.converged():
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=self._nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
polish_method = 'L-BFGS-B'
if self._wrapped_constraints:
polish_method = 'trust-constr'
constr_violation = self._constraint_violation_fn(DE_result.x)
if np.any(constr_violation > 0.):
warnings.warn("differential evolution didn't find a"
" solution satisfying the constraints,"
" attempting to polish from the least"
" infeasible solution", UserWarning)
result = minimize(self.func,
np.copy(DE_result.x),
method=polish_method,
bounds=self.limits.T,
constraints=self.constraints)
self._nfev += result.nfev
DE_result.nfev = self._nfev
# Polishing solution is only accepted if there is an improvement in
# cost function, the polishing was successful and the solution lies
# within the bounds.
if (result.fun < DE_result.fun and
result.success and
np.all(result.x <= self.limits[1]) and
np.all(self.limits[0] <= result.x)):
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
if self._wrapped_constraints:
DE_result.constr = [c.violation(DE_result.x) for
c in self._wrapped_constraints]
DE_result.constr_violation = np.max(
np.concatenate(DE_result.constr))
DE_result.maxcv = DE_result.constr_violation
if DE_result.maxcv > 0:
# if the result is infeasible then success must be False
DE_result.success = False
DE_result.message = ("The solution does not satisfy the"
" constraints, MAXCV = " % DE_result.maxcv)
return DE_result
def _calculate_population_energies(self, population):
"""
Calculate the energies of a population.
Parameters
----------
population : ndarray
An array of parameter vectors normalised to [0, 1] using lower
and upper limits. Has shape ``(np.size(population, 0), len(x))``.
Returns
-------
energies : ndarray
An array of energies corresponding to each population member. If
maxfun will be exceeded during this call, then the number of
function evaluations will be reduced and energies will be
right-padded with np.inf. Has shape ``(np.size(population, 0),)``
"""
num_members = np.size(population, 0)
nfevs = min(num_members,
self.maxfun - num_members)
energies = np.full(num_members, np.inf)
parameters_pop = self._scale_parameters(population)
try:
calc_energies = list(self._mapwrapper(self.func,
parameters_pop[0:nfevs]))
energies[0:nfevs] = np.squeeze(calc_energies)
except (TypeError, ValueError) as e:
# wrong number of arguments for _mapwrapper
# or wrong length returned from the mapper
raise RuntimeError(
"The map-like callable must be of the form f(func, iterable), "
"returning a sequence of numbers the same length as 'iterable'"
) from e
self._nfev += nfevs
return energies
def _promote_lowest_energy(self):
# swaps 'best solution' into first population entry
idx = np.arange(self.num_population_members)
feasible_solutions = idx[self.feasible]
if feasible_solutions.size:
# find the best feasible solution
idx_t = np.argmin(self.population_energies[feasible_solutions])
l = feasible_solutions[idx_t]
else:
# no solution was feasible, use 'best' infeasible solution, which
# will violate constraints the least
l = np.argmin(np.sum(self.constraint_violation, axis=1))
self.population_energies[[0, l]] = self.population_energies[[l, 0]]
self.population[[0, l], :] = self.population[[l, 0], :]
self.feasible[[0, l]] = self.feasible[[l, 0]]
self.constraint_violation[[0, l], :] = (
self.constraint_violation[[l, 0], :])
def _constraint_violation_fn(self, x):
"""
Calculates total constraint violation for all the constraints, for a given
solution.
Parameters
----------
x : ndarray
Solution vector
Returns
-------
cv : ndarray
Total violation of constraints. Has shape ``(M,)``, where M is the
number of constraints (if each constraint function only returns one
value)
"""
return np.concatenate([c.violation(x) for c in self._wrapped_constraints])
def _calculate_population_feasibilities(self, population):
"""
Calculate the feasibilities of a population.
Parameters
----------
population : ndarray
An array of parameter vectors normalised to [0, 1] using lower
and upper limits. Has shape ``(np.size(population, 0), len(x))``.
Returns
-------
feasible, constraint_violation : ndarray, ndarray
Boolean array of feasibility for each population member, and an
array of the constraint violation for each population member.
constraint_violation has shape ``(np.size(population, 0), M)``,
where M is the number of constraints.
"""
num_members = np.size(population, 0)
if not self._wrapped_constraints:
# shortcut for no constraints
return np.ones(num_members, bool), np.zeros((num_members, 1))
parameters_pop = self._scale_parameters(population)
constraint_violation = np.array([self._constraint_violation_fn(x)
for x in parameters_pop])
feasible = ~(np.sum(constraint_violation, axis=1) > 0)
return feasible, constraint_violation
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, *args):
return self._mapwrapper.__exit__(*args)
def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
energy_orig, feasible_orig, cv_orig):
"""
Trial is accepted if:
* it satisfies all constraints and provides a lower or equal objective
function value, while both the compared solutions are feasible
- or -
* it is feasible while the original solution is infeasible,
- or -
* it is infeasible, but provides a lower or equal constraint violation
for all constraint functions.
This test corresponds to section III of Lampinen [1]_.
Parameters
----------
energy_trial : float
Energy of the trial solution
feasible_trial : float
Feasibility of trial solution
cv_trial : array-like
Excess constraint violation for the trial solution
energy_orig : float
Energy of the original solution
feasible_orig : float
Feasibility of original solution
cv_orig : array-like
Excess constraint violation for the original solution
Returns
-------
accepted : bool
"""
if feasible_orig and feasible_trial:
return energy_trial <= energy_orig
elif feasible_trial and not feasible_orig:
return True
elif not feasible_trial and (cv_trial <= cv_orig).all():
# cv_trial < cv_orig would imply that both trial and orig are not
# feasible
return True
return False
def __next__(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# the population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies
if np.all(np.isinf(self.population_energies)):
self.feasible, self.constraint_violation = (
self._calculate_population_feasibilities(self.population))
# only need to work out population energies for those that are
# feasible
self.population_energies[self.feasible] = (
self._calculate_population_energies(
self.population[self.feasible]))
self._promote_lowest_energy()
if self.dither is not None:
self.scale = self.random_number_generator.uniform(self.dither[0],
self.dither[1])
if self._updating == 'immediate':
# update best solution immediately
for candidate in range(self.num_population_members):
if self._nfev > self.maxfun:
raise StopIteration
# create a trial solution
trial = self._mutate(candidate)
# ensuring that it's in the range [0, 1)
self._ensure_constraint(trial)
# scale from [0, 1) to the actual parameter value
parameters = self._scale_parameters(trial)
# determine the energy of the objective function
if self._wrapped_constraints:
cv = self._constraint_violation_fn(parameters)
feasible = False
energy = np.inf
if not np.sum(cv) > 0:
# solution is feasible
feasible = True
energy = self.func(parameters)
self._nfev += 1
else:
feasible = True
cv = np.atleast_2d([0.])
energy = self.func(parameters)
self._nfev += 1
# compare trial and population member
if self._accept_trial(energy, feasible, cv,
self.population_energies[candidate],
self.feasible[candidate],
self.constraint_violation[candidate]):
self.population[candidate] = trial
self.population_energies[candidate] = energy
self.feasible[candidate] = feasible
self.constraint_violation[candidate] = cv
# if the trial candidate is also better than the best
# solution then promote it.
if self._accept_trial(energy, feasible, cv,
self.population_energies[0],
self.feasible[0],
self.constraint_violation[0]):
self._promote_lowest_energy()
elif self._updating == 'deferred':
# update best solution once per generation
if self._nfev >= self.maxfun:
raise StopIteration
# 'deferred' approach, vectorised form.
# create trial solutions
trial_pop = np.array(
[self._mutate(i) for i in range(self.num_population_members)])
# enforce bounds
self._ensure_constraint(trial_pop)
# determine the energies of the objective function, but only for
# feasible trials
feasible, cv = self._calculate_population_feasibilities(trial_pop)
trial_energies = np.full(self.num_population_members, np.inf)
# only calculate for feasible entries
trial_energies[feasible] = self._calculate_population_energies(
trial_pop[feasible])
# which solutions are 'improved'?
loc = [self._accept_trial(*val) for val in
zip(trial_energies, feasible, cv, self.population_energies,
self.feasible, self.constraint_violation)]
loc = np.array(loc)
self.population = np.where(loc[:, np.newaxis],
trial_pop,
self.population)
self.population_energies = np.where(loc,
trial_energies,
self.population_energies)
self.feasible = np.where(loc,
feasible,
self.feasible)
self.constraint_violation = np.where(loc[:, np.newaxis],
cv,
self.constraint_violation)
# make sure the best solution is updated if updating='deferred'.
# put the lowest energy into the best solution position.
self._promote_lowest_energy()
return self.x, self.population_energies[0]
def _scale_parameters(self, trial):
"""Scale from a number between 0 and 1 to parameters."""
return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
def _unscale_parameters(self, parameters):
"""Scale from parameters to a number between 0 and 1."""
return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""Make sure the parameters lie between the limits."""
mask = np.where((trial > 1) | (trial < 0))
trial[mask] = self.random_number_generator.uniform(size=mask[0].shape)
def _mutate(self, candidate):
"""Create a trial vector based on a mutation strategy."""
trial = np.copy(self.population[candidate])
rng = self.random_number_generator
fill_point = rng.choice(self.parameter_count)
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = rng.uniform(size=self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
crossovers = rng.uniform(size=self.parameter_count)
crossovers = crossovers < self.cross_over_probability
while (i < self.parameter_count and crossovers[i]):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial
def _best1(self, samples):
"""best1bin, best1exp"""
r0, r1 = samples[:2]
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""rand1bin, rand1exp"""
r0, r1, r2 = samples[:3]
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, samples):
"""randtobest1bin, randtobest1exp"""
r0, r1, r2 = samples[:3]
bprime = np.copy(self.population[r0])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r1] -
self.population[r2])
return bprime
def _currenttobest1(self, candidate, samples):
"""currenttobest1bin, currenttobest1exp"""
r0, r1 = samples[:2]
bprime = (self.population[candidate] + self.scale *
(self.population[0] - self.population[candidate] +
self.population[r0] - self.population[r1]))
return bprime
def _best2(self, samples):
"""best2bin, best2exp"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1] -
self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""rand2bin, rand2exp"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(self.num_population_members),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(self.num_population_members))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
return idxs
class _FunctionWrapper:
"""
Object to wrap user cost function, allowing picklability
"""
def __init__(self, f, args):
self.f = f
self.args = [] if args is None else args
def __call__(self, x):
return self.f(x, *self.args)
class _ConstraintWrapper:
"""Object to wrap/evaluate user defined constraints.
Very similar in practice to `PreparedConstraint`, except that no evaluation
of jac/hess is performed (explicit or implicit).
If created successfully, it will contain the attributes listed below.
Parameters
----------
constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}
Constraint to check and prepare.
x0 : array_like
Initial vector of independent variables.
Attributes
----------
fun : callable
Function defining the constraint wrapped by one of the convenience
classes.
bounds : 2-tuple
Contains lower and upper bounds for the constraints --- lb and ub.
These are converted to ndarray and have a size equal to the number of
the constraints.
"""
def __init__(self, constraint, x0):
self.constraint = constraint
if isinstance(constraint, NonlinearConstraint):
def fun(x):
return np.atleast_1d(constraint.fun(x))
elif isinstance(constraint, LinearConstraint):
def fun(x):
if issparse(constraint.A):
A = constraint.A
else:
A = np.atleast_2d(constraint.A)
return A.dot(x)
elif isinstance(constraint, Bounds):
def fun(x):
return x
else:
raise ValueError("`constraint` of an unknown type is passed.")
self.fun = fun
lb = np.asarray(constraint.lb, dtype=float)
ub = np.asarray(constraint.ub, dtype=float)
f0 = fun(x0)
m = f0.size
if lb.ndim == 0:
lb = np.resize(lb, m)
if ub.ndim == 0:
ub = np.resize(ub, m)
self.bounds = (lb, ub)
def __call__(self, x):
return np.atleast_1d(self.fun(x))
def violation(self, x):
"""How much the constraint is exceeded by.
Parameters
----------
x : array-like
Vector of independent variables
Returns
-------
excess : array-like
How much the constraint is exceeded by, for each of the
constraints specified by `_ConstraintWrapper.fun`.
"""
ev = self.fun(np.asarray(x))
excess_lb = np.maximum(self.bounds[0] - ev, 0)
excess_ub = np.maximum(ev - self.bounds[1], 0)
return excess_lb + excess_ub
| {
"content_hash": "49dd8841ad7a5c319dbc75e97469dcdc",
"timestamp": "",
"source": "github",
"line_count": 1430,
"max_line_length": 82,
"avg_line_length": 42.56153846153846,
"alnum_prop": 0.5882884511115127,
"repo_name": "e-q/scipy",
"id": "7934f64e6dddbe095c36cf88128bc064137a454f",
"size": "60863",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scipy/optimize/_differentialevolution.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "649915"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12786221"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import six
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import INVALID_FORM_DATA
from kgb import SpyAgency
from reviewboard import scmtools
from reviewboard.diffviewer.models import DiffSet
from reviewboard.webapi.errors import (DIFF_PARSE_ERROR, INVALID_REPOSITORY,
REPO_FILE_NOT_FOUND)
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import validate_diff_mimetype
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import get_validate_diff_url
@six.add_metaclass(BasicTestsMetaclass)
class ResourceTests(SpyAgency, BaseWebAPITestCase):
"""Testing the ValidateDiffResource APIs."""
fixtures = ['test_users', 'test_scmtools']
sample_api_url = 'validation/diffs/'
test_http_methods = ('DELETE', 'PUT',)
resource = resources.validate_diff
VALID_GIT_DIFF = (
b'diff --git a/readme b/readme'
b'index d6613f5..5b50866 100644'
b'--- a/readme'
b'+++ b/readme'
b'@@ -1 +1,3 @@'
b' Hello there'
b'+'
b'+Oh hi!'
)
def setup_http_not_allowed_item_test(self, user):
return get_validate_diff_url()
#
# HTTP GET tests
#
def test_get(self):
"""Testing the GET validation/diffs/ API"""
self.api_get(get_validate_diff_url(),
expected_mimetype=validate_diff_mimetype)
@add_fixtures(['test_site'])
def test_get_with_site(self):
"""Testing the GET validation/diffs/ API with access to local site"""
self._login_user(local_site=True)
self.api_get(get_validate_diff_url(self.local_site_name),
expected_mimetype=validate_diff_mimetype)
@add_fixtures(['test_site'])
def test_get_with_site_no_access(self):
"""Testing the GET validation/diffs/ API
without access to local site
"""
self.api_get(get_validate_diff_url(self.local_site_name),
expected_status=403)
#
# HTTP POST tests
#
def test_post(self):
"""Testing the POST validation/diffs/ API"""
repository = self.create_repository(tool_name='Test')
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'git_readme.diff')
f = open(diff_filename, "r")
self.api_post(
get_validate_diff_url(),
{
'repository': repository.pk,
'path': f,
'basedir': '/trunk',
},
expected_status=200,
expected_mimetype=validate_diff_mimetype)
f.close()
@add_fixtures(['test_site'])
def test_post_with_site(self):
"""Testing the POST validation/diffs/ API
with access to a local site
"""
repository = self.create_repository(with_local_site=True,
tool_name='Test')
self._login_user(local_site=True)
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'git_readme.diff')
with open(diff_filename, 'r') as fp:
self.api_post(
get_validate_diff_url(self.local_site_name),
{
'repository': repository.pk,
'path': fp,
'basedir': '/trunk',
},
expected_status=200,
expected_mimetype=validate_diff_mimetype)
@add_fixtures(['test_site'])
def test_post_with_site_no_access(self):
"""Testing the POST validation/diffs/ API
without access to a local site
"""
repository = self.create_repository(with_local_site=True,
tool_name='Test')
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'git_readme.diff')
with open(diff_filename, 'r') as fp:
self.api_post(
get_validate_diff_url(self.local_site_name),
{
'repository': repository.pk,
'path': fp,
'basedir': '/trunk',
},
expected_status=403)
def test_post_with_base_commit_id(self):
"""Testing the POST validation/diffs/ API with base_commit_id"""
self.spy_on(DiffSet.objects.create_from_upload, call_original=True)
repository = self.create_repository(tool_name='Test')
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'git_readme.diff')
f = open(diff_filename, "r")
self.api_post(
get_validate_diff_url(),
{
'repository': repository.pk,
'path': f,
'basedir': '/trunk',
'base_commit_id': '1234',
},
expected_status=200,
expected_mimetype=validate_diff_mimetype)
f.close()
last_call = DiffSet.objects.create_from_upload.last_call
self.assertEqual(last_call.kwargs.get('base_commit_id'), '1234')
def test_post_with_missing_basedir(self):
"""Testing the POST validations/diffs/ API with a missing basedir"""
repository = self.create_repository(tool_name='Test')
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'git_readme.diff')
f = open(diff_filename, 'r')
rsp = self.api_post(
get_validate_diff_url(),
{
'repository': repository.pk,
'path': f,
},
expected_status=400)
f.close()
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertIn('basedir', rsp['fields'])
def test_post_with_files_not_found(self):
"""Testing the POST validation/diffs/ API
with source files not found
"""
repository = self.create_repository(tool_name='Test')
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'git_file_not_found.diff')
f = open(diff_filename, 'r')
rsp = self.api_post(
get_validate_diff_url(),
{
'repository': repository.pk,
'path': f,
'basedir': '',
},
expected_status=400)
f.close()
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], REPO_FILE_NOT_FOUND.code)
self.assertEqual(rsp['file'], 'missing-file')
self.assertEqual(rsp['revision'], 'd6613f0')
def test_post_with_parse_error(self):
"""Testing the POST validation/diffs/ API with a malformed diff file"""
repository = self.create_repository(tool_name='Test')
diff_filename = os.path.join(os.path.dirname(scmtools.__file__),
'testdata', 'stunnel.pem')
f = open(diff_filename, 'r')
rsp = self.api_post(
get_validate_diff_url(),
{
'repository': repository.pk,
'path': f,
'basedir': '/trunk',
},
expected_status=400)
f.close()
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], DIFF_PARSE_ERROR.code)
self.assertEqual(rsp['reason'],
'This does not appear to be a git diff')
self.assertEqual(rsp['linenum'], 0)
def test_post_with_conflicting_repos(self):
"""Testing the POST validations/diffs/ API with conflicting
repositories
"""
repository = self.create_repository(tool_name='Test')
self.create_repository(tool_name='Test',
name='Test 2',
path='blah',
mirror_path=repository.path)
rsp = self.api_post(
get_validate_diff_url(),
{
'repository': repository.path,
'path': SimpleUploadedFile('readme.diff', self.VALID_GIT_DIFF,
content_type='text/x-patch'),
'basedir': '/trunk',
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_REPOSITORY.code)
self.assertEqual(rsp['err']['msg'],
'Too many repositories matched "%s". Try '
'specifying the repository by name instead.'
% repository.path)
self.assertEqual(rsp['repository'], repository.path)
| {
"content_hash": "207c63d240f13a145ee9f70a3f8637de",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 79,
"avg_line_length": 35.447876447876446,
"alnum_prop": 0.542642413680427,
"repo_name": "beol/reviewboard",
"id": "c81a011714c50f3fcb874b93fd74a3bafecdf436",
"size": "9181",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/tests/test_validate_diff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212721"
},
{
"name": "HTML",
"bytes": "179427"
},
{
"name": "JavaScript",
"bytes": "1463002"
},
{
"name": "Python",
"bytes": "3686127"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
"""Tests for remote eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import numpy as np
from tensorflow.contrib.eager.python import parameter_server
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import remote
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
JOB_NAME = "remote_device"
ALT_JOB_NAME = "alt_remote_device"
def run_sync_and_async(f):
"""Execute all test methods in the given class in sync and async modes."""
@functools.wraps(f)
def decorator(self, *args, **kwargs):
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def get_server_def(job_name, local_server_port, remote_server_addresses,
task_index):
"""Returns a server def with a single job + multiple tasks."""
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = "localhost:%d" % local_server_port
for i, remote_server_address in enumerate(remote_server_addresses, start=1):
job_def.tasks[i] = remote_server_address
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name=job_name,
task_index=task_index,
protocol="grpc")
return server_def
class RemoteExecutionTest(test.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(RemoteExecutionTest, self).__init__(methodName)
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1"
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
def setUp(self):
# Start the local server.
local_port = pywrap_tensorflow.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
@test_util.run_gpu_only
@run_sync_and_async
def testGpuToRemoteCopy(self):
with ops.device("gpu:0"):
x = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
y = math_ops.matmul(x, x)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testDefunMatmul(self):
"""Basic remote eager execution with defun."""
mm_defun = function.defun(math_ops.matmul)
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
x2 = array_ops.ones([2, 2])
y = mm_defun(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testSimpleMatmul(self):
"""Basic remote eager execution."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
def testParameterServer(self):
with parameter_server.parameter_server_scope(
is_chief=True, ps_job_name=JOB_NAME, num_ps_tasks=3):
v0 = variables.Variable([1.0], name="v0")
v1 = variables.Variable([2.0], name="v1")
v0.assign(v0 * v1)
self.assertAllEqual(v0.read_value(), [2.0])
self.assertAllEqual(v0.device,
"/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME)
self.assertAllEqual(v1.device,
"/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME)
v1.assign_add(v1)
# Simulate aliasing another variable of the same name as v1
with ops.device("/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
v1_replica = parameter_server.SharedVariable(
[1.0], name="v1", initialize=False)
self.assertAllEqual(v1_replica.read_value(), [4.0])
@run_sync_and_async
def testSimpleWeightRead(self):
"""Basic remote eager weight read."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
w = resource_variable_ops.ResourceVariable([[2.0]])
loss = w * w
np.testing.assert_array_equal([[4.0]], loss.numpy())
@run_sync_and_async
def testTapeWeightRead(self):
"""Remote eager weight read in a tape."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
w = resource_variable_ops.ResourceVariable([[3.0]])
with backprop.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
np.testing.assert_array_equal([[9.0]], loss.numpy())
np.testing.assert_array_equal([[6.0]], grad.numpy())
@run_sync_and_async
def testServerDefChanged(self):
"""Update server def, and run ops on new cluster."""
context.set_server_def(
server_def=get_server_def(
ALT_JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % ALT_JOB_NAME):
x1 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Set the server def back to JOB_NAME
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testConnectToRemoteServer(self):
"""Basic server connection."""
remote.connect_to_remote_host(self._cached_server1_target)
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@run_sync_and_async
def testContextDeviceUpdated(self):
"""Tests that the context device is correctly updated."""
with ops.device("cpu:0"):
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# `y` is placed on the local CPU as expected.
self.assertEqual(y.device,
"/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME)
@test_util.run_gpu_only
@run_sync_and_async
def testGPUToRemoteCopy(self):
"""Tests that the remote copy happens satisfactorily."""
x1 = array_ops.ones([2, 2]).gpu()
with ops.device("/job:remote_device/replica:0/task:1/device:CPU:0"):
x2 = x1._copy() # pylint: disable=protected-access
np.testing.assert_array_equal(x1.numpy(), x2.numpy())
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| {
"content_hash": "fa7e9c1605fd81ac5e6bb1a56e29e31f",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 78,
"avg_line_length": 34.56170212765957,
"alnum_prop": 0.6470081260773208,
"repo_name": "chemelnucfin/tensorflow",
"id": "e3329ca1c79aaff7d0e1ec873354c0be6674ab6e",
"size": "8811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/eager/python/remote_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
import inspect
from unittest import TestCase
from mock import patch
import random
import gc
from allura.lib.decorators import task, memoize
from alluratest.controller import setup_basic_test, setup_global_objects
class TestTask(TestCase):
def setup_method(self, method):
setup_basic_test()
def test_no_params(self):
@task
def func():
pass
self.assertTrue(hasattr(func, 'post'))
def test_with_params(self):
@task(disable_notifications=True)
def func():
pass
self.assertTrue(hasattr(func, 'post'))
@patch('allura.lib.decorators.c')
@patch('allura.model.MonQTask')
def test_post(self, c, MonQTask):
@task(disable_notifications=True)
def func(s, foo=None, **kw):
pass
def mock_post(f, args, kw, delay=None):
self.assertTrue(c.project.notifications_disabled)
self.assertFalse('delay' in kw)
self.assertEqual(delay, 1)
self.assertEqual(kw, dict(foo=2))
self.assertEqual(args, ('test',))
self.assertEqual(f, func)
c.project.notifications_disabled = False
MonQTask.post.side_effect = mock_post
func.post('test', foo=2, delay=1)
class TestMemoize:
def test_function(self):
@memoize
def remember_randomy(do_random, foo=None):
if do_random:
return random.random()
else:
return "constant"
rand1 = remember_randomy(True)
rand2 = remember_randomy(True)
const1 = remember_randomy(False)
rand_kwargs1 = remember_randomy(True, foo='asdf')
rand_kwargs2 = remember_randomy(True, foo='xyzzy')
assert rand1 == rand2
assert const1 == "constant"
assert rand1 != rand_kwargs1
assert rand_kwargs1 != rand_kwargs2
def test_methods(self):
class Randomy:
@memoize
def randomy(self, do_random):
if do_random:
return random.random()
else:
return "constant"
@memoize
def other(self, do_random):
if do_random:
return random.random()
else:
return "constant"
r = Randomy()
rand1 = r.randomy(True)
rand2 = r.randomy(True)
const1 = r.randomy(False)
other1 = r.other(True)
other2 = r.other(True)
assert rand1 == rand2
assert const1 == "constant"
assert rand1 != other1
assert other1 == other2
r2 = Randomy()
r2rand1 = r2.randomy(True)
r2rand2 = r2.randomy(True)
r2const1 = r2.randomy(False)
r2other1 = r2.other(True)
r2other2 = r2.other(True)
assert r2rand1 != rand1
assert r2rand1 == r2rand2
assert r2other1 != other1
assert r2other1 == r2other2
def test_methods_garbage_collection(self):
class Randomy:
@memoize
def randomy(self, do_random):
if do_random:
return random.random()
else:
return "constant"
r = Randomy()
rand1 = r.randomy(True)
for gc_ref in gc.get_referrers(r):
if inspect.isframe(gc_ref):
continue
else:
raise AssertionError('Unexpected reference to `r` instance: {!r}\n'
'@memoize probably made a reference to it and has created a circular reference loop'.format(gc_ref))
| {
"content_hash": "7379864eb1294fffb743d97c8a07c018",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 137,
"avg_line_length": 28.74015748031496,
"alnum_prop": 0.549041095890411,
"repo_name": "apache/allura",
"id": "80c3bd46ecedd1a7e37c5e2c4c6c69f6f5d5dc25",
"size": "4519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Allura/allura/tests/test_decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "181457"
},
{
"name": "Dockerfile",
"bytes": "4748"
},
{
"name": "HTML",
"bytes": "867332"
},
{
"name": "JavaScript",
"bytes": "1191836"
},
{
"name": "Makefile",
"bytes": "6248"
},
{
"name": "Python",
"bytes": "4499987"
},
{
"name": "RAML",
"bytes": "27600"
},
{
"name": "Roff",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "1280"
},
{
"name": "SCSS",
"bytes": "27742"
},
{
"name": "Shell",
"bytes": "131207"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
from src.Perro import Perro
import src.Gato
if __name__ == "__main__":
lassie = Perro("Lassie", "Bulldog")
lassie.comer()
lassie.dormir()
print(lassie.nombre + " es una "+ lassie.raza)
misifus = src.Gato.Gato("Misifus")
misifus.dormir()
print("Mi gato se llama " + misifus.getNombre()) | {
"content_hash": "518a9e7ce3de4a0293c30b6bbdc0c0a9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 26.25,
"alnum_prop": 0.6253968253968254,
"repo_name": "ampotty/uip-pc3",
"id": "bdc5abffcd311645ce659cc965220a9d9daa0389",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ejemplos/ejemplo19/src/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3272"
},
{
"name": "CSS",
"bytes": "25758"
},
{
"name": "HTML",
"bytes": "20380"
},
{
"name": "JavaScript",
"bytes": "192486"
},
{
"name": "Makefile",
"bytes": "2464"
},
{
"name": "Python",
"bytes": "74342"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
} |
"""
This file exists for backward compatibility reasons.
"""
from logging import warning
from .nonp import NoNumpyException, DEFAULT_ENCODERS, DEFAULT_HOOKS, dumps, dump, loads, load # keep 'unused' imports
from .utils import hashodict, NoPandasException
from .comment import strip_comment_line_with_symbol, strip_comments # keep 'unused' imports
from .encoders import TricksEncoder, json_date_time_encode, class_instance_encode, ClassInstanceEncoder, \
numpy_encode, NumpyEncoder # keep 'unused' imports
from .decoders import DuplicateJsonKeyException, TricksPairHook, json_date_time_hook, ClassInstanceHook, \
json_complex_hook, json_set_hook, json_numpy_obj_hook # keep 'unused' imports
try:
import numpy
except ImportError:
raise NoNumpyException('Could not load numpy, maybe it is not installed? If you do not want to use numpy encoding '
'or decoding, you can import the functions from json_tricks.nonp instead, which do not need numpy.')
# todo: warning('`json_tricks.np` is deprecated, you can import directly from `json_tricks`')
DEFAULT_NP_ENCODERS = [numpy_encode,] + DEFAULT_ENCODERS # DEPRECATED
DEFAULT_NP_HOOKS = [json_numpy_obj_hook,] + DEFAULT_HOOKS # DEPRECATED
| {
"content_hash": "637f33c106062a936b292e23de98d3b9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 118,
"avg_line_length": 44.51851851851852,
"alnum_prop": 0.7703826955074875,
"repo_name": "pannal/Subliminal.bundle",
"id": "676041f9f1f6736e05316cca3227a9cf7467e011",
"size": "1203",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Contents/Libraries/Shared/json_tricks/np.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3012769"
},
{
"name": "Python",
"bytes": "3311785"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
} |
import json, os, sys, threading, time
if sys.version_info >= (3,0):
isPython3 = True
import queue
from queue import Queue
else:
isPython3 = False
import Queue
from Queue import Queue
from time import sleep
from threading import Lock
#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=
#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=# C L A S S E S =#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=
#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=
#--- General logging functionality
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
class Logger(threading.Thread):
subsys = 'LOGGER'
log_queue = Queue(500)
log_lock = Lock()
debug = False
def __init__(self, log_name, log_dir=os.getcwd(), debug_on=False):
threading.Thread.__init__(self)
self.log_start_time = time.time()
self._log_sleep_time = 0.050 # seconds
self.stop = threading.Event()
self.stop.clear()
self.log_full_filename = os.path.join(log_dir, log_name)
Logger.debug = debug_on
def run(self):
# check if the log file already exists
log_file_exists = os.path.isfile(self.log_full_filename)
# log a start message
Logger.log(Logger.subsys,
'Logger initialized, {:s}: "{:s}"'.format(
"appending to" if log_file_exists else "created",
self.log_full_filename))
# start main logging loop
while not self.is_terminated():
# check if any messages to log
self.flush_queue()
sleep(self._log_sleep_time)
# before terminating, log a message, flush the queue
Logger.log(self.subsys, 'Logger terminating ...')
self.flush_queue()
@staticmethod
def log(log_subsys, log_message, log_type='info', log_data=None):
current_time = time.time()
# form log entry dictionary
log_entry = {
'time' : current_time,
'subsys' : log_subsys,
'type' : log_type,
'message' : log_message,
}
if log_data is not None:
log_dict = dict(log_entry, **log_data)
else:
log_dict = log_entry
if Logger.debug:
print("LOG {:s} | {:s}".format(time.strftime("%H:%M:%S", time.localtime(current_time)), log_message))
# attempt to place in queue
try:
Logger.log_queue.put(log_dict)
except Queue.Full as e:
sys.stderr.write('Warning: log queue full, discarding message: "{:s}"\n'.format(log_message))
@staticmethod
def log_error(log_subsys, log_message, log_data=None):
Logger.log(log_subsys, log_message, 'error', log_data)
@staticmethod
def log_warning(log_subsys, log_message, log_data=None):
Logger.log(log_subsys, log_message, 'warning', log_data)
@staticmethod
def log_exception(log_subsys, log_message, log_exception):
log_type = 'exception'
log_data = {
'exception_type' : log_exception.__class__.__name__,
'exception_msg' : str(log_exception)
}
Logger.log(log_subsys, log_message, log_type, log_data)
def flush_queue(self):
# check number of messages in queue
num_log_entries = self.log_queue.qsize()
if num_log_entries > 0:
# open the log file
with open(self.log_full_filename, 'ab') as log_file:
for i in range(num_log_entries):
log_entry = self.log_queue.get()
# append extra log information
current_time = log_entry['time']
current_time_str = time.asctime(time.localtime(current_time))
log_entry['localtime'] = current_time_str
# log the message as a JSON string
if isPython3:
log_file.write(bytes(json.dumps(log_entry) + "\n", 'UTF-8'))
else:
log_file.write(json.dumps(log_entry) + "\n")
@staticmethod
def clear_queue():
while not Logger.log_queue.empty():
Logger.log_queue.get()
def is_terminated(self):
return self.stop.is_set()
def terminate(self):
self.stop.set()
| {
"content_hash": "ef36cd2b15f1741889426a3de7b7ab5c",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 113,
"avg_line_length": 32.013513513513516,
"alnum_prop": 0.48016040523427606,
"repo_name": "Vivero/KerbalPie",
"id": "a61f49c7f72909a09a6ee0d7b48c309bf305569f",
"size": "5000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "117298"
}
],
"symlink_target": ""
} |
"""Tests for layers.feature_column_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def testRealValuedColumnIsIdentityTransformation(self):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(real_valued)
with self.test_session():
self.assertAllEqual(output.eval(), [[20.], [110], [-3]])
def testSparseRealValuedColumnIdentityTransformation(self):
sparse_real_valued = feature_column._real_valued_var_len_column(
"rating", is_sparse=True)
rating_tensor = sparse_tensor.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
features = {"rating": rating_tensor}
output = feature_column_ops._Transformer(features).transform(
sparse_real_valued)
with self.test_session():
self.assertAllEqual(output.values.eval(), rating_tensor.values.eval())
self.assertAllEqual(output.indices.eval(), rating_tensor.indices.eval())
self.assertAllEqual(output.dense_shape.eval(),
rating_tensor.dense_shape.eval())
def testSparseRealValuedColumnWithTransformation(self):
def square_fn(x):
return x**2
sparse_real_valued = feature_column._real_valued_var_len_column(
"rating", normalizer=square_fn, is_sparse=True)
rating_tensor = sparse_tensor.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
features = {"rating": rating_tensor}
output_dict = feature_column_ops.transform_features(features,
[sparse_real_valued])
self.assertTrue(sparse_real_valued in output_dict)
output = output_dict[sparse_real_valued]
with self.test_session():
self.assertArrayNear(output.values.eval(), [4.0, 25.0], 1e-5)
self.assertAllEqual(output.indices.eval(), rating_tensor.indices.eval())
self.assertAllEqual(output.dense_shape.eval(),
rating_tensor.dense_shape.eval())
def testBucketizedColumn(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[bucket])
self.assertEqual(len(output), 1)
self.assertIn(bucket, output)
with self.test_session():
self.assertAllEqual(output[bucket].eval(), [[2], [3], [0]])
def testBucketizedColumnWithMultiDimensions(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {
"price": constant_op.constant([[20., 110], [110., 20], [-3, -3]])
}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])
def testCachedTransformation(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
transformer = feature_column_ops._Transformer(features)
with self.test_session() as sess:
transformer.transform(bucket)
num_of_ops = len(sess.graph.get_operations())
# Verify that the second call to transform the same feature
# doesn't increase the number of ops.
transformer.transform(bucket)
self.assertEqual(num_of_ops, len(sess.graph.get_operations()))
def testSparseColumnWithHashBucket(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
with self.test_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
self.assertAllEqual(output[hashed_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[hashed_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseIntColumnWithHashBucket(self):
"""Tests a sparse column with int values."""
hashed_sparse = feature_column.sparse_column_with_hash_bucket(
"wire", 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
with self.test_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
self.assertAllEqual(output[hashed_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[hashed_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithHashBucketWithDenseInputTensor(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "rick"]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.values.dtype, dtypes.int64)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output.dense_shape.eval(), [2, 2])
def testEmbeddingColumn(self):
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_embedding = feature_column.embedding_column(hashed_sparse, 10)
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse, wire_embedding])
# Check that features dict haven't changed
self.assertEqual({"wire": wire_tensor}, features)
self.assertEqual(len(output), 2)
self.assertIn(hashed_sparse, output)
self.assertIn(wire_embedding, output)
with self.test_session():
self.assertAllEqual(output[wire_embedding].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[wire_embedding].dense_shape.eval(), [2, 2])
self.assertAllEqual(output[wire_embedding].values.eval(),
output[hashed_sparse].values.eval())
def testSparseColumnWithKeys(self):
keys_sparse = feature_column.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer"])
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[keys_sparse])
self.assertEqual(len(output), 1)
self.assertIn(keys_sparse, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertEqual(output[keys_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[keys_sparse].values.eval(), [1, 2, 0])
self.assertAllEqual(output[keys_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[keys_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithKeysWithDenseInputTensor(self):
keys_sparse = feature_column.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer", "rick"])
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "rick"]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(keys_sparse)
with self.test_session():
data_flow_ops.tables_initializer().run()
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.dtype, dtypes.int64)
self.assertAllEqual(output.values.eval(), [1, 2, 0, 3])
self.assertAllEqual(output.indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output.dense_shape.eval(), [2, 2])
def testSparseColumnWithHashBucket_IsIntegerized(self):
hashed_sparse = feature_column.sparse_column_with_integerized_feature(
"wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=[100, 1, 25],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
with self.test_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int32)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
self.assertAllEqual(output[hashed_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[hashed_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithHashBucketWithDenseInputTensor_IsIntegerized(self):
hashed_sparse = feature_column.sparse_column_with_integerized_feature(
"wire", 10)
# wire_tensor = tf.SparseTensor(values=[100, 1, 25],
# indices=[[0, 0], [1, 0], [1, 1]],
# dense_shape=[2, 2])
wire_tensor = constant_op.constant([[100, 0], [1, 25]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.values.dtype, dtypes.int32)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output.dense_shape.eval(), [2, 2])
def testWeightedSparseColumn(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[weighted_ids])
self.assertEqual(len(output), 1)
self.assertIn(weighted_ids, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertAllEqual(output[weighted_ids][0].dense_shape.eval(),
ids_tensor.dense_shape.eval())
self.assertAllEqual(output[weighted_ids][0].indices.eval(),
ids_tensor.indices.eval())
self.assertAllEqual(output[weighted_ids][0].values.eval(), [2, 2, 0])
self.assertAllEqual(output[weighted_ids][1].dense_shape.eval(),
weights_tensor.dense_shape.eval())
self.assertAllEqual(output[weighted_ids][1].indices.eval(),
weights_tensor.indices.eval())
self.assertEqual(output[weighted_ids][1].values.dtype, dtypes.float32)
self.assertAllEqual(output[weighted_ids][1].values.eval(),
weights_tensor.values.eval())
def testSparseColumnWithVocabulary(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "movies.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["marlo", "omar", "stringer"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseColumnWithVocabularyWithDenseInputTensor(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "movies.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["marlo", "omar", "stringer"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3)
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "omar"]])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0, 1])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(), [2, 2])
def testSparseIntColumnWithVocabulary(self):
"""Tests a sparse integer column with vocabulary."""
vocabulary_file = os.path.join(self.get_temp_dir(), "courses.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["101", "201", "301"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[201, 301, 101],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(),
wire_tensor.dense_shape.eval())
def testSparseIntColumnWithVocabularyWithDenseInputTensor(self):
"""Tests a sparse integer column with vocabulary."""
vocabulary_file = os.path.join(self.get_temp_dir(), "courses.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["101", "201", "301"]) + "\n")
vocab_sparse = feature_column.sparse_column_with_vocabulary_file(
"wire", vocabulary_file, vocab_size=3, dtype=dtypes.int64)
wire_tensor = constant_op.constant([[201, 301], [101, 201]])
features = {"wire": wire_tensor}
output = feature_column_ops.transform_features(
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0, 1])
self.assertAllEqual(output[vocab_sparse].indices.eval(),
[[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(output[vocab_sparse].dense_shape.eval(), [2, 2])
def testCrossColumn(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=15)
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[country_language])
self.assertEqual(len(output), 1)
self.assertIn(country_language, output)
with self.test_session():
self.assertEqual(output[country_language].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[country_language].values.eval(
)))
def testCrossWithBucketizedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=15)
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[country_price])
self.assertEqual(len(output), 1)
self.assertIn(country_price, output)
with self.test_session():
self.assertEqual(output[country_price].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[country_price].values.eval()))
def testCrossWithMultiDimensionBucketizedColumn(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=1000)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20., 210.], [110., 50.], [-3., -30.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV", "US"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price], num_outputs=1))
weights = column_to_variable[country_price][0]
grad = array_ops.squeeze(
gradients_impl.gradients(output, weights)[0].values)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertEqual(len(grad.eval()), 6)
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[country_price])
self.assertEqual(len(output), 1)
self.assertIn(country_price, output)
def testCrossWithCrossedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=15)
wire = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_country_price = feature_column.crossed_column(
[wire, country_price], hash_bucket_size=15)
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [0, 1], [0, 2]],
dense_shape=[1, 3])
}
# Test transform features.
output = feature_column_ops.transform_features(
features=features, feature_columns=[wire_country_price])
self.assertEqual(len(output), 1)
self.assertIn(wire_country_price, output)
with self.test_session():
self.assertEqual(output[wire_country_price].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[wire_country_price].values.eval(
)))
def testIfFeatureTableContainsTransformationReturnIt(self):
any_column = feature_column.sparse_column_with_hash_bucket("sparse", 10)
features = {any_column: "any-thing-even-not-a-tensor"}
output = feature_column_ops._Transformer(features).transform(any_column)
self.assertEqual(output, "any-thing-even-not-a-tensor")
class CreateInputLayersForDNNsTest(test.TestCase):
def testFeatureColumnDictFails(self):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
with self.assertRaisesRegexp(
ValueError,
"Expected feature_columns to be iterable, found dict"):
feature_column_ops.input_from_feature_columns(
features, {"feature": real_valued})
def testSparseTensorRealValuedColumn(self):
var_len_sparse_real_valued_column = (
feature_column._real_valued_var_len_column("rating", is_sparse=True))
features = {
"ids":
sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"income":
constant_op.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]]),
"rating":
sparse_tensor.SparseTensor(
values=[3.5, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
}
with self.assertRaisesRegexp(
ValueError,
"dd"):
feature_column_ops.input_from_feature_columns(
features, [var_len_sparse_real_valued_column])
def testAllDNNColumns(self):
sparse_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
real_valued_column = feature_column.real_valued_column("income", 2)
one_hot_column = feature_column.one_hot_column(sparse_column)
embedding_column = feature_column.embedding_column(sparse_column, 10)
features = {
"ids":
sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"income":
constant_op.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]]),
}
output = feature_column_ops.input_from_feature_columns(features, [
one_hot_column, embedding_column, real_valued_column])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 2 + 4 + 10])
def testRealValuedColumn(self):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testRealValuedColumnWithMultiDimensions(self):
real_valued = feature_column.real_valued_column("price", 2)
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testRealValuedColumnSparse(self):
sparse_real_valued = feature_column._real_valued_var_len_column(
"rating", default_value=-1)
rating = [[2.0], [-1.0], [5.0]]
features = {"rating": constant_op.constant(rating)}
with self.assertRaisesRegexp(
ValueError,
"Error creating input layer for column: rating.*"):
feature_column_ops.input_from_feature_columns(features,
[sparse_real_valued])
def testRealValuedColumnWithNormalizer(self):
real_valued = feature_column.real_valued_column(
"price", normalizer=lambda x: x - 2)
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):
real_valued = feature_column.real_valued_column(
"price", 2, normalizer=lambda x: x - 2)
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
def testBucketizedColumnWithNormalizerSucceedsForDNN(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column(
"price", normalizer=lambda x: x - 15),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features, [bucket])
expected = [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testBucketizedColumnWithMultiDimensionsSucceedsForDNN(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets [2, 3], [3, 2], [0, 0]. dimension = 2
features = {
"price": constant_op.constant([[20., 200], [110, 50], [-3, -3]])
}
output = feature_column_ops.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testOneHotColumnFromWeightedSparseColumnSucceedsForDNN(self):
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b", "a", "c"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
weighted_ids_column = feature_column.weighted_sparse_column(ids_column,
"weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0, 40.0],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
one_hot_column = feature_column.one_hot_column(weighted_ids_column)
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_column])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 10., 0], [0, 20., 0, 0], [30., 0, 40., 0]],
output.eval())
def testOneHotColumnFromSparseColumnWithKeysSucceedsForDNN(self):
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
one_hot_sparse = feature_column.one_hot_column(ids_column)
features = {"ids": ids_tensor}
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]],
output.eval())
def testOneHotColumnFromMultivalentSparseColumnWithKeysSucceedsForDNN(self):
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b", "a", "c"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
one_hot_sparse = feature_column.one_hot_column(ids_column)
features = {"ids": ids_tensor}
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
def testOneHotColumnFromSparseColumnWithIntegerizedFeaturePassesForDNN(self):
ids_column = feature_column.sparse_column_with_integerized_feature(
"ids", bucket_size=4)
one_hot_sparse = feature_column.one_hot_column(ids_column)
features = {
"ids":
sparse_tensor.SparseTensor(
values=[2, 1, 0, 2],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
}
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
def testOneHotColumnFromSparseColumnWithHashBucketSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("feat", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["a", "b", "c1", "c2"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
features = {"feat": wire_tensor}
one_hot_sparse = feature_column.one_hot_column(hashed_sparse)
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual([3, 10], output.eval().shape)
def testEmbeddingColumnSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo", "xx", "yy"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0], [3, 0]],
dense_shape=[4, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [4, 10])
def testScatteredEmbeddingColumnSucceedsForDNN(self):
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo", "omar"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0]],
dense_shape=[3, 2])
features = {"wire": wire_tensor}
# Big enough hash space so that hopefully there is no collision
embedded_sparse = feature_column.scattered_embedding_column(
"wire", 1000, 3, layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
output = feature_column_ops.input_from_feature_columns(
features, [embedded_sparse], weight_collections=["my_collection"])
weights = ops.get_collection("my_collection")
grad = gradients_impl.gradients(output, weights)
with self.test_session():
variables_lib.global_variables_initializer().run()
gradient_values = []
# Collect the gradient from the different partitions (one in this test)
for p in range(len(grad)):
gradient_values.extend(grad[p].values.eval())
gradient_values.sort()
self.assertAllEqual(gradient_values, [0.5] * 6 + [2] * 3)
def testEmbeddingColumnWithInitializerSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
init_value = 133.7
embeded_sparse = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.constant_initializer(init_value))
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))
def testEmbeddingColumnWithMultipleInitializersFails(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
embedded_sparse = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.truncated_normal_initializer(
mean=42, stddev=1337))
embedded_sparse_alternate = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.truncated_normal_initializer(
mean=1337, stddev=42))
# Makes sure that trying to use different initializers with the same
# embedding column explicitly fails.
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
"Duplicate feature column key found for column: wire_embedding"):
feature_column_ops.input_from_feature_columns(
features, [embedded_sparse, embedded_sparse_alternate])
def testEmbeddingColumnWithWeightedSparseColumnSucceedsForDNN(self):
"""Tests DNN input with embedded weighted sparse column."""
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
embeded_sparse = feature_column.embedding_column(weighted_ids, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testEmbeddingColumnWithIntegerWeightedSparseColumnSucceedsForDNN(self):
"""Same as the previous test, but with integer weights."""
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(
ids, "weights", dtype=dtypes.int32)
weights_tensor = sparse_tensor.SparseTensor(
values=constant_op.constant([10, 20, 30], dtype=dtypes.int32),
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
embeded_sparse = feature_column.embedding_column(weighted_ids, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testEmbeddingColumnWithCrossedColumnSucceedsForDNN(self):
a = feature_column.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100)
b = feature_column.sparse_column_with_hash_bucket(
"bbb", hash_bucket_size=100)
crossed = feature_column.crossed_column(set([a, b]), hash_bucket_size=10000)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
embeded_sparse = feature_column.embedding_column(crossed, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testSparseColumnFailsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: wire"):
variables_lib.global_variables_initializer().run()
feature_column_ops.input_from_feature_columns(features, [hashed_sparse])
def testWeightedSparseColumnFailsForDNN(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
"Error creating input layer for column: ids_weighted_by_weights"):
data_flow_ops.tables_initializer().run()
feature_column_ops.input_from_feature_columns(features, [weighted_ids])
def testCrossedColumnFailsForDNN(self):
a = feature_column.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100)
b = feature_column.sparse_column_with_hash_bucket(
"bbb", hash_bucket_size=100)
crossed = feature_column.crossed_column(set([a, b]), hash_bucket_size=10000)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: aaa_X_bbb"):
variables_lib.global_variables_initializer().run()
feature_column_ops.input_from_feature_columns(features, [crossed])
def testDeepColumnsSucceedForDNN(self):
real_valued = feature_column.real_valued_column("income", 3)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"income":
constant_op.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),
"price":
constant_op.constant([[20., 200], [110, 2], [-20, -30]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(
hashed_sparse, 10, initializer=init_ops.constant_initializer(133.7))
output = feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
def testEmbeddingColumnForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(
hashed_sparse,
1,
combiner="sum",
initializer=init_ops.ones_initializer())
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
# score: (number of values)
self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
def testEmbeddingColumnWithMaxNormForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
features = {"wire": wire_tensor}
embedded_sparse = feature_column.embedding_column(
hashed_sparse,
1,
combiner="sum",
initializer=init_ops.ones_initializer(),
max_norm=0.5)
output = feature_column_ops.input_from_feature_columns(features,
[embedded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
# score: (number of values * 0.5)
self.assertAllClose(output.eval(), [[0.5], [1.], [0.]])
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[3, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
embeded_sparse = feature_column.embedding_column(
weighted_ids,
1,
combiner="sum",
initializer=init_ops.ones_initializer())
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
# score: (sum of weights)
self.assertAllEqual(output.eval(), [[10.], [50.], [0.]])
def testInputLayerWithCollectionsForDNN(self):
real_valued = feature_column.real_valued_column("price")
bucket = feature_column.bucketized_column(
real_valued, boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"price":
constant_op.constant([[20.], [110], [-3]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"])
weights = ops.get_collection("my_collection")
# one variable for embeded sparse
self.assertEqual(1, len(weights))
def testInputLayerWithTrainableArgForDNN(self):
real_valued = feature_column.real_valued_column("price")
bucket = feature_column.bucketized_column(
real_valued, boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"price":
constant_op.constant([[20.], [110], [-3]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=False)
# There should not be any trainable variables
self.assertEqual(0, len(variables_lib.trainable_variables()))
feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=True)
# There should one trainable variable for embeded sparse
self.assertEqual(1, len(variables_lib.trainable_variables()))
def testInputLayerWithNonTrainableEmbeddingForDNN(self):
sparse_1 = feature_column.sparse_column_with_hash_bucket("wire_1", 10)
sparse_2 = feature_column.sparse_column_with_hash_bucket("wire_2", 10)
features = {
"wire_1":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
"wire_2":
sparse_tensor.SparseTensor(
values=["jack", "jill"],
indices=[[0, 0], [1, 0]],
dense_shape=[4, 1])
}
dims_1 = 10
init_1 = 3.14
embeded_1 = feature_column.embedding_column(
sparse_1, dims_1, initializer=init_ops.constant_initializer(init_1),
trainable=False)
output_1 = feature_column_ops.input_from_feature_columns(
features, [embeded_1])
# There should be no trainable variables for sparse_1
self.assertEqual(0, len(variables_lib.trainable_variables()))
dims_2 = 7
init_2 = 6.14
embeded_2 = feature_column.embedding_column(
sparse_2, dims_2, initializer=init_ops.constant_initializer(init_2),
trainable=True)
output_2 = feature_column_ops.input_from_feature_columns(
features, [embeded_2])
# There should be one trainable variables for sparse_2
self.assertEqual(1, len(variables_lib.trainable_variables()))
with self.test_session():
variables_lib.global_variables_initializer().run()
output_1_eval = output_1.eval()
output_2_eval = output_2.eval()
self.assertAllEqual(output_1_eval.shape, [3, dims_1])
self.assertAllClose(output_1_eval, np.tile(init_1, [3, dims_1]))
self.assertAllEqual(output_2_eval.shape, [4, dims_2])
self.assertAllClose(output_2_eval, np.concatenate(
(np.tile(init_2, [2, dims_2]), np.tile(0, [2, dims_2]))))
class SequenceInputFromFeatureColumnTest(test.TestCase):
def testSupportedColumns(self):
measurement = feature_column.real_valued_column("measurements")
country = feature_column.sparse_column_with_hash_bucket("country", 100)
pets = feature_column.sparse_column_with_hash_bucket("pets", 100)
ids = feature_column.sparse_column_with_integerized_feature("id", 100)
country_x_pets = feature_column.crossed_column([country, pets], 100)
country_x_pets_onehot = feature_column.one_hot_column(country_x_pets)
bucketized_measurement = feature_column.bucketized_column(measurement,
[.25, .5, .75])
embedded_id = feature_column.embedding_column(ids, 100)
# `_BucketizedColumn` is not supported.
self.assertRaisesRegexp(
ValueError,
"FeatureColumn type _BucketizedColumn is not currently supported",
feature_column_ops.sequence_input_from_feature_columns, {},
[measurement, bucketized_measurement])
# `_CrossedColumn` is not supported.
self.assertRaisesRegexp(
ValueError,
"FeatureColumn type _CrossedColumn is not currently supported",
feature_column_ops.sequence_input_from_feature_columns, {},
[embedded_id, country_x_pets])
# `country_x_pets_onehot` depends on a `_CrossedColumn` which is forbidden.
self.assertRaisesRegexp(
ValueError, "Column country_X_pets .* _CrossedColumn",
feature_column_ops.sequence_input_from_feature_columns, {},
[embedded_id, country_x_pets_onehot])
def testRealValuedColumn(self):
batch_size = 4
sequence_length = 8
dimension = 3
np.random.seed(1111)
measurement_input = np.random.rand(batch_size, sequence_length, dimension)
measurement_column = feature_column.real_valued_column("measurements")
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(measurement_input, model_inputs)
def testRealValuedColumnWithExtraDimensions(self):
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
measurement_input = np.random.rand(batch_size, sequence_length, *dimensions)
measurement_column = feature_column.real_valued_column("measurements")
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(reshaped_measurements, model_inputs)
def testRealValuedColumnWithNormalizer(self):
batch_size = 4
sequence_length = 8
dimension = 3
normalizer = lambda x: x - 2
np.random.seed(3333)
measurement_input = np.random.rand(batch_size, sequence_length, dimension)
measurement_column = feature_column.real_valued_column(
"measurements", normalizer=normalizer)
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(measurement_input), model_inputs)
def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
normalizer = lambda x: x / 2.0
np.random.seed(1234)
measurement_input = np.random.rand(batch_size, sequence_length, *dimensions)
measurement_column = feature_column.real_valued_column(
"measurements", normalizer=normalizer)
columns_to_tensors = {
"measurements": constant_op.constant(measurement_input)
}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(reshaped_measurements), model_inputs)
def testOneHotColumnFromSparseColumnWithKeys(self):
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
ids_column = feature_column.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
one_hot_column = feature_column.one_hot_column(ids_column)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = np.array([4, 3, 4])
expected_model_input = np.array(
[[[0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
[[1, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]],
dtype=np.float32)
self.assertAllEqual(expected_input_shape, model_input.shape)
self.assertAllClose(expected_model_input, model_input)
def testOneHotColumnFromSparseColumnWithHashBucket(self):
hash_buckets = 10
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
one_hot_column = feature_column.one_hot_column(hashed_ids_column)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = np.array([4, 3, hash_buckets])
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumn(self):
hash_buckets = 10
embedding_dimension = 5
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
expected_input_shape = np.array([4, 3, embedding_dimension])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = feature_column.embedding_column(hashed_ids_column,
embedding_dimension)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumnWithAutoReshape(self):
hash_buckets = 10
embedding_dimension = 5
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0], [0, 1],
[1, 0], [1, 1], [1, 2],
[3, 2]],
dense_shape=[4, 3])
expected_input_shape = np.array([4, 3, embedding_dimension])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = feature_column.embedding_column(hashed_ids_column,
embedding_dimension)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumnGradient(self):
hash_buckets = 1000
embedding_dimension = 3
ids_tensor = sparse_tensor.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
hashed_ids_column = feature_column.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = feature_column.embedding_column(
hashed_ids_column, embedding_dimension, combiner="sum")
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column],
weight_collections=["my_collection"])
embedding_weights = ops.get_collection("my_collection")
gradient_tensor = gradients_impl.gradients(model_input_tensor,
embedding_weights)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
model_input, gradients = sess.run([model_input_tensor, gradient_tensor])
expected_input_shape = [4, 3, embedding_dimension]
self.assertAllEqual(expected_input_shape, model_input.shape)
# `ids_tensor` consists of 7 instances of <empty>, 3 occurences of "b",
# 2 occurences of "c" and 1 instance of "a".
expected_gradient_values = sorted([0., 3., 2., 1.] * embedding_dimension)
actual_gradient_values = np.sort(gradients[0].values, axis=None)
self.assertAllClose(expected_gradient_values, actual_gradient_values)
def testMultipleColumns(self):
batch_size = 4
sequence_length = 3
measurement_dimension = 5
country_hash_size = 10
max_id = 200
id_embedding_dimension = 11
normalizer = lambda x: x / 10.0
measurement_tensor = random_ops.random_uniform(
[batch_size, sequence_length, measurement_dimension])
country_tensor = sparse_tensor.SparseTensor(
values=["us", "ca",
"ru", "fr", "ca",
"mx"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
id_tensor = sparse_tensor.SparseTensor(
values=[2, 5,
26, 123, 1,
0],
indices=[[0, 0, 0], [0, 0, 1],
[0, 1, 1], [1, 0, 0], [1, 1, 0],
[3, 2, 0]],
dense_shape=[4, 3, 2])
columns_to_tensors = {
"measurements": measurement_tensor,
"country": country_tensor,
"id": id_tensor
}
measurement_column = feature_column.real_valued_column(
"measurements", normalizer=normalizer)
country_column = feature_column.sparse_column_with_hash_bucket(
"country", country_hash_size)
id_column = feature_column.sparse_column_with_integerized_feature("id",
max_id)
onehot_country_column = feature_column.one_hot_column(country_column)
embedded_id_column = feature_column.embedding_column(id_column,
id_embedding_dimension)
model_input_columns = [
measurement_column, onehot_country_column, embedded_id_column
]
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, model_input_columns)
self.assertEqual(dtypes.float32, model_input_tensor.dtype)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = [
batch_size, sequence_length,
measurement_dimension + country_hash_size + id_embedding_dimension
]
self.assertAllEqual(expected_input_shape, model_input.shape)
class WeightedSumTest(test.TestCase):
def testFeatureColumnDictFails(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
with self.assertRaisesRegexp(
ValueError,
"Expected feature_columns to be iterable, found dict"):
feature_column_ops.weighted_sum_from_feature_columns(
features, {"feature": hashed_sparse}, num_outputs=5)
def testSparseColumn(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testSparseIntColumn(self):
"""Tests a sparse column with int values."""
hashed_sparse = feature_column.sparse_column_with_hash_bucket(
"wire", 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testSparseColumnWithDenseInputTensor(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = constant_op.constant(
[["omar", "stringer"], ["marlo", "rick"]])
features = {"wire": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testWeightedSparseColumn(self):
ids = feature_column.sparse_column_with_keys("ids",
["marlo", "omar", "stringer"])
ids_tensor = sparse_tensor.SparseTensor(
values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = sparse_tensor.SparseTensor(
values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testWeightedSparseColumnWithDenseInputTensor(self):
ids = feature_column.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer", "rick"])
ids_tensor = constant_op.constant([["omar", "stringer"], ["marlo", "rick"]])
weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
weights_tensor = constant_op.constant([[10.0, 20.0], [30.0, 40.0]])
features = {"ids": ids_tensor, "weights": weights_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testCrossedColumn(self):
a = feature_column.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100)
b = feature_column.sparse_column_with_hash_bucket(
"bbb", hash_bucket_size=100)
crossed = feature_column.crossed_column(set([a, b]), hash_bucket_size=10000)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [crossed], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testEmbeddingColumn(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating weighted sum for column: wire_embedding"):
variables_lib.global_variables_initializer().run()
feature_column_ops.weighted_sum_from_feature_columns(
features, [embeded_sparse], num_outputs=5)
def testSparseFeatureColumnWithVocabularyFile(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "movies.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["head-on", "matrix", "winter sleep"]) + "\n")
movies = feature_column.sparse_column_with_vocabulary_file(
column_name="movies", vocabulary_file=vocabulary_file, vocab_size=3)
with ops.Graph().as_default():
features = {
"movies":
sparse_tensor.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [movies], num_outputs=1))
with self.test_session() as sess:
variables_lib.initialize_all_variables().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (3, 1))
sess.run(weights.assign([[0.1], [0.3], [0.5]]))
# score for first example = 0.3 (matrix) + 0.1 (head-on) = 0.4
# score for second example = 0.5 (winter sleep)
self.assertAllClose(output.eval(), [[0.4], [0.5]])
def testRealValuedColumnWithMultiDimensions(self):
real_valued = feature_column.real_valued_column("price", 2)
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [real_valued], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testBucketizedColumnWithMultiDimensions(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
features = {
"price": constant_op.constant([[20., 10.], [110, 0.], [-3, 30]])
}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testAllWideColumns(self):
real_valued = feature_column.real_valued_column("income", 2)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
crossed = feature_column.crossed_column([bucket, hashed_sparse], 100)
features = {
"income":
constant_op.constant([[20., 10], [110, 0], [-3, 30]]),
"price":
constant_op.constant([[20.], [110], [-3]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
output, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [real_valued, bucket, hashed_sparse, crossed], num_outputs=5)
with self.test_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 5])
def testPredictions(self):
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"])
age = feature_column.real_valued_column("age")
with ops.Graph().as_default():
features = {
"age":
constant_op.constant([[1], [2]]),
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [age, language], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: 0.1 + age*0.1
sess.run(column_to_variable[age][0].assign([[0.2]]))
self.assertAllClose(output.eval(), [[0.3], [0.5]])
# score: 0.1 + age*0.1 + language_weight[language_index]
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.5], [0.6]])
def testJointPredictions(self):
country = feature_column.sparse_column_with_keys(
column_name="country", keys=["us", "finland"])
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"])
with ops.Graph().as_default():
features = {
"country":
sparse_tensor.SparseTensor(
values=["finland", "us"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}
output, variables, bias = (
feature_column_ops.joint_weighted_sum_from_feature_columns(
features, [country, language], num_outputs=1))
# Assert that only a single weight is created.
self.assertEqual(len(variables), 1)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# shape is [5,1] because 1 class and 2 + 3 features.
self.assertEquals(variables[0].get_shape().as_list(), [5, 1])
# score: bias + country_weight + language_weight
sess.run(variables[0].assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.8], [0.5]])
def testJointPredictionsWeightedFails(self):
language = feature_column.weighted_sparse_column(
feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"]),
"weight")
with ops.Graph().as_default():
features = {
"weight":
constant_op.constant([[1], [2]]),
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}
with self.assertRaises(AssertionError):
feature_column_ops.joint_weighted_sum_from_feature_columns(
features, [language], num_outputs=1)
def testJointPredictionsRealFails(self):
age = feature_column.real_valued_column("age")
with ops.Graph().as_default():
features = {"age": constant_op.constant([[1], [2]]),}
with self.assertRaises(NotImplementedError):
feature_column_ops.joint_weighted_sum_from_feature_columns(
features, [age], num_outputs=1)
def testPredictionsWithWeightedSparseColumn(self):
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "finnish", "hindi"])
weighted_language = feature_column.weighted_sparse_column(
sparse_id_column=language, weight_column_name="age")
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"age":
sparse_tensor.SparseTensor(
values=[10.0, 20.0],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_language], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: bias + age*language_weight[index]
sess.run(column_to_variable[weighted_language][0].assign([[0.1], [0.2],
[0.3]]))
self.assertAllClose(output.eval(), [[3.1], [2.1]])
def testPredictionsWithMultivalentColumnButNoCross(self):
language = feature_column.sparse_column_with_keys(
column_name="language", keys=["english", "turkish", "hindi"])
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [language], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
# score: 0.1 + language_weight['hindi'] + language_weight['english']
sess.run(bias.assign([0.1]))
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.4]])
def testSparseFeatureColumnWithHashedBucketSize(self):
movies = feature_column.sparse_column_with_hash_bucket(
column_name="movies", hash_bucket_size=15)
with ops.Graph().as_default():
features = {
"movies":
sparse_tensor.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [movies], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (15, 1))
sess.run(weights.assign(weights + 0.4))
# score for first example = 0.4 (matrix) + 0.4 (head-on) = 0.8
# score for second example = 0.4 (winter sleep)
self.assertAllClose(output.eval(), [[0.8], [0.4]])
def testCrossUsageInPredictions(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
self.assertAllClose(output.eval(), [[0.4], [0.4]])
def testCrossColumnByItself(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
language_language = feature_column.crossed_column(
[language, language], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [language_language], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[language_language][0]
sess.run(weights.assign(weights + 0.4))
# There are two features inside language. If we cross it by itself we'll
# have four crossed features.
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictions(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictionsWithPartition(self):
# bucket size has to be big enough to allow sharding.
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=64 << 19)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=64 << 18)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=64 << 18)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
with variable_scope.variable_scope(
"weighted_sum_from_feature_columns",
features.values(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=10, min_slice_size=((64 << 20) - 1))) as scope:
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, language, country_language],
num_outputs=1,
scope=scope))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
self.assertEqual(2, len(column_to_variable[country]))
self.assertEqual(3, len(column_to_variable[language]))
self.assertEqual(2, len(column_to_variable[country_language]))
weights = column_to_variable[country_language]
for partition_variable in weights:
sess.run(partition_variable.assign(partition_variable + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testRealValuedColumnHavingMultiDimensions(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = feature_column.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = feature_column.real_valued_column("incomes", 3)
with ops.Graph().as_default():
features = {
"age":
constant_op.constant([[1], [1]]),
"incomes":
constant_op.constant([[100., 200., 300.], [10., 20., 30.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, age, incomes], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[140.], [14.]])
def testMulticlassWithRealValuedColumnHavingMultiDimensionsAndSparse(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = feature_column.real_valued_column("age")
# The following RealValuedColumn has no predefined dimension so it
# can be missing.
height = feature_column._real_valued_var_len_column("height",
default_value=0,
is_sparse=False)
# The following RealValuedColumn has 3 dimensions.
incomes = feature_column.real_valued_column("incomes", 3)
with ops.Graph().as_default():
features = {
"age":
constant_op.constant([[1], [1]]),
"incomes":
constant_op.constant([[100., 200., 300.], [10., 20., 30.]]),
"height":
constant_op.constant([[5., 4.], [0., 6.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, age, height, incomes], num_outputs=5))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
height_weights = column_to_variable[height][0]
sess.run(
height_weights.assign(
[[1., 2., 3., 5., 10.], [1., 2., 3., 5., 10.]]))
self.assertAllClose(output.eval(), [[9., 18., 27., 45., 90.],
[6., 12., 18., 30., 60.]])
incomes_weights = column_to_variable[incomes][0]
sess.run(
incomes_weights.assign([[0.01, 0.1, 1., 10., 100.],
[0.02, 0.2, 2., 20., 200.],
[0.03, 0.3, 3., 30., 300.]]))
self.assertAllClose(
output.eval(),
[[14. + 9., 140. + 18., 1400. + 27., 14000. + 45., 140000. + 90.],
[1.4 + 6., 14. + 12., 140. + 18., 1400. + 30., 14000. + 60.]])
def testBucketizedColumn(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
with ops.Graph().as_default():
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
sess.run(column_to_variable[bucket][0].assign([[0.1], [0.2], [0.3],
[0.4]]))
self.assertAllClose(output.eval(), [[0.3], [0.4], [0.1]])
def testBucketizedColumnHavingMultiDimensions(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with ops.Graph().as_default():
# buckets 2, 3, 0
features = {
"price":
constant_op.constant([[20., 210], [110, 50], [-3, -30]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[3, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket, country], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
# dimension = 2, bucket_size = 4, num_classes = 1
sess.run(column_to_variable[bucket][0].assign(
[[0.1], [0.2], [0.3], [0.4], [1], [2], [3], [4]]))
self.assertAllClose(output.eval(), [[0.3 + 4], [0.4 + 3], [0.1 + 1]])
def testMulticlassWithBucketizedColumnHavingMultiDimensions(self):
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with ops.Graph().as_default():
# buckets 2, 3, 0
features = {
"price":
constant_op.constant([[20., 210], [110, 50], [-3, -30]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [1, 0]],
dense_shape=[3, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket, country], num_outputs=5))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
# dimension = 2, bucket_size = 4, num_classes = 5
sess.run(column_to_variable[bucket][0].assign(
[[0.1, 1, 10, 100, 1000], [0.2, 2, 20, 200, 2000],
[0.3, 3, 30, 300, 3000], [0.4, 4, 40, 400, 4000],
[5, 50, 500, 5000, 50000], [6, 60, 600, 6000, 60000],
[7, 70, 700, 7000, 70000], [8, 80, 800, 8000, 80000]]))
self.assertAllClose(
output.eval(),
[[0.3 + 8, 3 + 80, 30 + 800, 300 + 8000, 3000 + 80000],
[0.4 + 7, 4 + 70, 40 + 700, 400 + 7000, 4000 + 70000],
[0.1 + 5, 1 + 50, 10 + 500, 100 + 5000, 1000 + 50000]])
def testCrossWithBucketizedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[country_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4
self.assertAllClose(output.eval(), [[0.8]])
def testCrossWithCrossedColumn(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = feature_column.crossed_column(
[language, country], hash_bucket_size=10)
country_language_price = feature_column.crossed_column(
set([country_language, price_bucket]), hash_bucket_size=15)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2]),
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language_price], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[country_language_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testIntegerizedColumn(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {
"product":
sparse_tensor.SparseTensor(
values=[0, 4, 2],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithDenseInputTensor(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {"product": constant_op.constant([[0], [4], [2]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithDenseInputTensor2(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {"product": constant_op.constant([[0, 4], [2, 3]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.6], [0.7]])
def testIntegerizedColumnWithInvalidId(self):
product = feature_column.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with ops.Graph().as_default():
features = {
"product":
sparse_tensor.SparseTensor(
values=[5, 4, 7],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testMulticlassWithOnlyBias(self):
with ops.Graph().as_default():
features = {"age": constant_op.constant([[10.], [20.], [30.], [40.]])}
output, _, bias = feature_column_ops.weighted_sum_from_feature_columns(
features, [feature_column.real_valued_column("age")], num_outputs=3)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
sess.run(bias.assign([0.1, 0.2, 0.3]))
self.assertAllClose(output.eval(), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3],
[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def testMulticlassWithRealValuedColumn(self):
with ops.Graph().as_default():
column = feature_column.real_valued_column("age")
features = {"age": constant_op.constant([[10.], [20.], [30.], [40.]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (1, 3))
sess.run(weights.assign([[0.01, 0.03, 0.05]]))
self.assertAllClose(output.eval(), [[0.1, 0.3, 0.5], [0.2, 0.6, 1.0],
[0.3, 0.9, 1.5], [0.4, 1.2, 2.0]])
def testMulticlassWithSparseColumn(self):
with ops.Graph().as_default():
column = feature_column.sparse_column_with_keys(
column_name="language",
keys=["english", "arabic", "hindi", "russian", "swahili"])
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english", "arabic", "russian"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(output.eval(), [[0.3, 0.6, 0.9],
[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.4, 0.7, 1.0]])
def testMulticlassWithBucketizedColumn(self):
column = feature_column.bucketized_column(
feature_column.real_valued_column("price"),
boundaries=[0., 100., 500., 1000.])
with ops.Graph().as_default():
# buckets 0, 2, 1, 2
features = {"price": constant_op.constant([[-3], [110], [20.], [210]])}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(output.eval(), [[0.1, 0.4, 0.7],
[0.3, 0.6, 0.9],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9]])
def testMulticlassWithCrossedColumn(self):
language = feature_column.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=2)
column = feature_column.crossed_column(
{language, country}, hash_bucket_size=5)
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["english", "spanish", "russian", "swahili"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV", "RU", "KE"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(array_ops.shape(output).eval(), [4, 3])
def testMulticlassWithMultivalentColumn(self):
column = feature_column.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi", "russian", "swahili"])
with ops.Graph().as_default():
features = {
"language":
sparse_tensor.SparseTensor(
values=["hindi", "english", "turkish", "turkish", "english"],
indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 2])
}
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
data_flow_ops.tables_initializer().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(
weights.assign([[0.1, 0.4, 0.7],
[0.2, 0.5, 0.8],
[0.3, 0.6, 0.9],
[0.4, 0.7, 1.0],
[0.5, 0.8, 1.1]]))
self.assertAllClose(output.eval(), [[0.4, 1.0, 1.6],
[0.2, 0.5, 0.8],
[0.2, 0.5, 0.8],
[0.1, 0.4, 0.7]])
def testVariablesAddedToCollection(self):
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price"), boundaries=[0., 10., 100.])
country = feature_column.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = feature_column.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with ops.Graph().as_default():
features = {
"price":
constant_op.constant([[20.]]),
"country":
sparse_tensor.SparseTensor(
values=["US", "SV"],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price, price_bucket],
num_outputs=1,
weight_collections=["my_collection"])
weights = ops.get_collection("my_collection")
# 3 = bias + price_bucket + country_price
self.assertEqual(3, len(weights))
class ParseExampleTest(test.TestCase):
def testParseExample(self):
bucket = feature_column.bucketized_column(
feature_column.real_valued_column(
"price", dimension=3),
boundaries=[0., 10., 100.])
wire_cast = feature_column.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
# buckets 2, 3, 0
data = example_pb2.Example(features=feature_pb2.Features(feature={
"price":
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110, -3])),
"wire_cast":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"stringer", b"marlo"])),
}))
output = feature_column_ops.parse_feature_columns_from_examples(
serialized=[data.SerializeToString()],
feature_columns=[bucket, wire_cast])
self.assertIn(bucket, output)
self.assertIn(wire_cast, output)
with self.test_session():
data_flow_ops.tables_initializer().run()
self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])
self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])
self.assertAllEqual(output[wire_cast].values.eval(), [2, 0])
def testParseSequenceExample(self):
location_keys = ["east_side", "west_side", "nyc"]
embedding_dimension = 10
location = feature_column.sparse_column_with_keys(
"location", keys=location_keys)
location_onehot = feature_column.one_hot_column(location)
wire_cast = feature_column.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
wire_cast_embedded = feature_column.embedding_column(
wire_cast, dimension=embedding_dimension)
measurements = feature_column.real_valued_column(
"measurements", dimension=2)
context_feature_columns = [location_onehot]
sequence_feature_columns = [wire_cast_embedded, measurements]
sequence_example = example_pb2.SequenceExample(
context=feature_pb2.Features(feature={
"location":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"west_side"])),
}),
feature_lists=feature_pb2.FeatureLists(feature_list={
"wire_cast":
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"marlo", b"stringer"])),
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"omar", b"stringer", b"marlo"])),
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b"marlo"])),
]),
"measurements":
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[0.2, 0.3])),
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[0.1, 0.8])),
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[0.5, 0.0])),
])
}))
ctx, seq = feature_column_ops.parse_feature_columns_from_sequence_examples(
serialized=sequence_example.SerializeToString(),
context_feature_columns=context_feature_columns,
sequence_feature_columns=sequence_feature_columns)
self.assertIn("location", ctx)
self.assertIsInstance(ctx["location"], sparse_tensor.SparseTensor)
self.assertIn("wire_cast", seq)
self.assertIsInstance(seq["wire_cast"], sparse_tensor.SparseTensor)
self.assertIn("measurements", seq)
self.assertIsInstance(seq["measurements"], ops.Tensor)
with self.test_session() as sess:
location_val, wire_cast_val, measurement_val = sess.run(
[ctx["location"], seq["wire_cast"], seq["measurements"]])
self.assertAllEqual(location_val.indices, np.array([[0]]))
self.assertAllEqual(location_val.values, np.array([b"west_side"]))
self.assertAllEqual(location_val.dense_shape, np.array([1]))
self.assertAllEqual(wire_cast_val.indices,
np.array(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0]]))
self.assertAllEqual(
wire_cast_val.values,
np.array(
[b"marlo", b"stringer", b"omar", b"stringer", b"marlo", b"marlo"]))
self.assertAllEqual(wire_cast_val.dense_shape, np.array([3, 3]))
self.assertAllClose(measurement_val,
np.array([[0.2, 0.3], [0.1, 0.8], [0.5, 0.0]]))
class InferRealValuedColumnTest(test.TestCase):
def testTensorInt32(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.int32)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.int32)
])
def testTensorInt64(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.int64)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.int64)
])
def testTensorFloat32(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.float32)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.float32)
])
def testTensorFloat64(self):
self.assertEqual(
feature_column_ops.infer_real_valued_columns(
array_ops.zeros(
shape=[33, 4], dtype=dtypes.float64)), [
feature_column.real_valued_column(
"", dimension=4, dtype=dtypes.float64)
])
def testDictionary(self):
self.assertItemsEqual(
feature_column_ops.infer_real_valued_columns({
"a": array_ops.zeros(
shape=[33, 4], dtype=dtypes.int32),
"b": array_ops.zeros(
shape=[3, 2], dtype=dtypes.float32)
}), [
feature_column.real_valued_column(
"a", dimension=4, dtype=dtypes.int32),
feature_column.real_valued_column(
"b", dimension=2, dtype=dtypes.float32)
])
def testNotGoodDtype(self):
with self.assertRaises(ValueError):
feature_column_ops.infer_real_valued_columns(
constant_op.constant(
[["a"]], dtype=dtypes.string))
def testSparseTensor(self):
with self.assertRaises(ValueError):
feature_column_ops.infer_real_valued_columns(
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=["a"], dense_shape=[1, 1]))
if __name__ == "__main__":
test.main()
| {
"content_hash": "6228aaf9d3564d0f65cef06ffd1dad6c",
"timestamp": "",
"source": "github",
"line_count": 2653,
"max_line_length": 80,
"avg_line_length": 43.78137957029777,
"alnum_prop": 0.5924650458020525,
"repo_name": "thesuperzapper/tensorflow",
"id": "632836fee440b53e78b2728abce6e971a9273925",
"size": "116841",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/layers/feature_column_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182478"
},
{
"name": "C++",
"bytes": "23390838"
},
{
"name": "CMake",
"bytes": "158350"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "804926"
},
{
"name": "HTML",
"bytes": "818043"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37344"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "213841"
},
{
"name": "Python",
"bytes": "20454745"
},
{
"name": "Shell",
"bytes": "337255"
},
{
"name": "TypeScript",
"bytes": "1244173"
}
],
"symlink_target": ""
} |
def extractInacloudspacesBlogspotCom(item):
'''
Parser for 'inacloudspaces.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Drunken Exquisiteness', 'Drunken Exquisiteness', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "5cc354bcf909a5b38359534a302bb357",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 34.76190476190476,
"alnum_prop": 0.563013698630137,
"repo_name": "fake-name/ReadableWebProxy",
"id": "7e2781338237dc2f88e96ea7f509a277a65895f2",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractInacloudspacesBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""Tests for lingvo-JAX checkpoint_managers."""
import datetime
import os
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from lingvo.jax import checkpoint_managers
from lingvo.jax import checkpoint_pb2
import tensorflow.compat.v2 as tf
CheckpointType = checkpoint_pb2.CheckpointType
FLAGS = flags.FLAGS
CHECKPOINT_PREFIX = checkpoint_managers.CHECKPOINT_PREFIX
def _create_dummy_checkpoint(root_dir, step, checkpoint_type):
"""Creates dummy checkpoint files for a given global_step_id."""
if checkpoint_type == CheckpointType.CHECKPOINT_FLAX:
filename = os.path.join(root_dir, f'{CHECKPOINT_PREFIX}{step}')
with tf.io.gfile.GFile(filename, 'wb') as writer:
writer.write('')
elif checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:
for i in range(2):
process_dir = os.path.join(root_dir, f'{i:03d}')
tf.io.gfile.makedirs(process_dir)
filename = os.path.join(process_dir, f'{CHECKPOINT_PREFIX}{step}')
with tf.io.gfile.GFile(filename, 'wb') as writer:
writer.write('')
elif checkpoint_type in {
CheckpointType.CHECKPOINT_PERSISTENCE,
CheckpointType.CHECKPOINT_GDA,
}:
checkpoint_dir = os.path.join(root_dir, f'{CHECKPOINT_PREFIX}{step:08d}')
tf.io.gfile.makedirs(checkpoint_dir)
for f in {'a', 'b'}:
filename = os.path.join(checkpoint_dir, f)
with tf.io.gfile.GFile(filename, 'wb') as writer:
writer.write('')
else:
raise ValueError(f'Unsupported checkpoint_type `{checkpoint_type}`.')
def _base_checkpoint_filenames(steps, checkpoint_type):
"""Returns checkpoint basenames corresponding to all the `steps`."""
if checkpoint_type == CheckpointType.CHECKPOINT_FLAX:
results = []
for step in steps:
results.append(f'{CHECKPOINT_PREFIX}{step}')
return results
elif checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:
results = []
for i in range(2):
process_dir = f'{i:03d}'
for step in steps:
results.append(os.path.join(process_dir, f'{CHECKPOINT_PREFIX}{step}'))
return results
elif checkpoint_type in {
CheckpointType.CHECKPOINT_PERSISTENCE,
CheckpointType.CHECKPOINT_GDA,
}:
results = []
for step in steps:
results.append(f'{CHECKPOINT_PREFIX}{step:08d}')
return results
else:
raise ValueError(f'Unsupported checkpoint_type `{checkpoint_type}`.')
def _create_reference_checkpoint_history(config_name, root_dir, checkpoint_type,
steps, checkpoint_datetimes):
checkpoint_history = checkpoint_pb2.CheckpointHistory(
config_name=config_name,
root_directory=root_dir,
checkpoint_type=checkpoint_type)
for step, checkpoint_datetime in zip(steps, checkpoint_datetimes):
timestamp = checkpoint_managers.to_timestamp(checkpoint_datetime)
checkpoint_history.checkpoints.add(
global_step_id=step, timestamp_sec=timestamp)
return checkpoint_history
class CheckpointManagerTest(parameterized.TestCase):
def assertCheckpointsFileProto(self, checkpoints_filename, expected_proto):
self.assertTrue(tf.io.gfile.exists(checkpoints_filename))
checkpoint_history = checkpoint_managers.read_checkpoint_file(
checkpoints_filename)
self.assertEqual(checkpoint_history, expected_proto)
def test_extract_latest_checkpoint_id(self):
config_name = 'test.test_module.ConfigName'
root_dir = os.path.join(FLAGS.test_tmpdir, 'test1')
steps = [100, 300, 700]
cdt = datetime.datetime.now()
datetimes = [
cdt, cdt + datetime.timedelta(hours=1),
cdt + datetime.timedelta(hours=2)
]
checkpoint_history = _create_reference_checkpoint_history(
config_name, root_dir, CheckpointType.CHECKPOINT_FLAX, steps, datetimes)
latest_checkpoint_id = checkpoint_managers.extract_latest_checkpoint_id(
checkpoint_history)
self.assertEqual(latest_checkpoint_id, steps[-1])
@parameterized.named_parameters(
{
'testcase_name': 'flax',
'checkpoint_type': CheckpointType.CHECKPOINT_FLAX
}, {
'testcase_name': 'persistence',
'checkpoint_type': CheckpointType.CHECKPOINT_PERSISTENCE
}, {
'testcase_name': 'gda',
'checkpoint_type': CheckpointType.CHECKPOINT_GDA
})
def test_save_no_max_to_keep(self, checkpoint_type):
config_name = 'test.test_module.ConfigName'
root_dir = os.path.join(FLAGS.test_tmpdir, 'test2', str(checkpoint_type),
'checkpoints')
tf.io.gfile.makedirs(root_dir)
current_datetime = datetime.datetime.now()
zero_datetime = datetime.datetime.fromtimestamp(0)
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=1000,
max_to_keep=None)
steps = list(range(0, 10000, 1000))
checkpoint_datetimes = []
for step in steps:
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
checkpoint_datetimes.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps = steps
saved_checkpoint_datetimes = checkpoint_datetimes
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps,
saved_checkpoint_datetimes)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
@parameterized.named_parameters(
{
'testcase_name': 'flax',
'checkpoint_type': CheckpointType.CHECKPOINT_FLAX
}, {
'testcase_name': 'persistence',
'checkpoint_type': CheckpointType.CHECKPOINT_PERSISTENCE
}, {
'testcase_name': 'gda',
'checkpoint_type': CheckpointType.CHECKPOINT_GDA
})
def test_save_max_to_keep(self, checkpoint_type):
config_name = 'test.test_module.ConfigName'
root_dir = os.path.join(FLAGS.test_tmpdir, 'test3', str(checkpoint_type),
'checkpoints')
tf.io.gfile.makedirs(root_dir)
current_datetime = datetime.datetime.now()
zero_datetime = datetime.datetime.fromtimestamp(0)
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=1000,
max_to_keep=2)
steps = list(range(0, 10000, 1000))
checkpoint_datetimes = []
for step in steps:
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
checkpoint_datetimes.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps = [8000, 9000]
saved_checkpoint_datetimes = checkpoint_datetimes[8:]
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps,
saved_checkpoint_datetimes)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
@parameterized.named_parameters(
{
'testcase_name': 'flax',
'checkpoint_type': CheckpointType.CHECKPOINT_FLAX
}, {
'testcase_name': 'persistence',
'checkpoint_type': CheckpointType.CHECKPOINT_PERSISTENCE
}, {
'testcase_name': 'gda',
'checkpoint_type': CheckpointType.CHECKPOINT_GDA
})
def test_save_checkpoint_keep_interval_timedelta(self, checkpoint_type):
config_name = 'test.test_module.ConfigName'
root_dir = os.path.join(FLAGS.test_tmpdir, 'test4', str(checkpoint_type),
'checkpoints')
tf.io.gfile.makedirs(root_dir)
current_datetime = datetime.datetime.now()
zero_datetime = datetime.datetime.fromtimestamp(0)
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=1000,
max_to_keep=2,
keep_interval_timedelta=datetime.timedelta(hours=2))
steps = list(range(0, 10000, 1000))
checkpoint_datetimes = []
for step in steps:
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
checkpoint_datetimes.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps = [0, 2000, 4000, 6000, 8000, 9000]
saved_checkpoint_datetimes = checkpoint_datetimes[::2] + [
checkpoint_datetimes[-1]
]
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps,
saved_checkpoint_datetimes)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
@parameterized.named_parameters(
{
'testcase_name': 'flax',
'checkpoint_type': CheckpointType.CHECKPOINT_FLAX
}, {
'testcase_name': 'persistence',
'checkpoint_type': CheckpointType.CHECKPOINT_PERSISTENCE
}, {
'testcase_name': 'gda',
'checkpoint_type': CheckpointType.CHECKPOINT_GDA
})
def test_save_restore_manager_case_1_default(self, checkpoint_type):
config_name = 'test.test_module.ConfigName'
root_dir = os.path.join(FLAGS.test_tmpdir, 'test5', str(checkpoint_type),
'checkpoints')
tf.io.gfile.makedirs(root_dir)
current_datetime = datetime.datetime.now()
zero_datetime = datetime.datetime.fromtimestamp(0)
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=2000,
max_to_keep=4)
steps = list(range(0, 10000, 1000))
checkpoint_datetimes = []
for step in steps:
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
checkpoint_datetimes.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps = [2000, 4000, 6000, 8000]
saved_checkpoint_datetimes = checkpoint_datetimes[2:10:2]
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps,
saved_checkpoint_datetimes)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
del checkpoint_manager
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=3000,
max_to_keep=6,
keep_interval_timedelta=datetime.timedelta(hours=3))
saved_steps_2_init = [2000, 4000, 6000, 8000]
saved_checkpoint_datetimes_2_init = saved_checkpoint_datetimes
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames,
_base_checkpoint_filenames(saved_steps_2_init, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps_2_init,
saved_checkpoint_datetimes_2_init)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
steps_2 = list(range(10000, 20000, 1000))
checkpoint_datetimes_2 = []
for step in steps_2:
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
checkpoint_datetimes_2.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps_2 = saved_steps_2_init + [11000, 14000, 17000]
saved_checkpoint_datetimes_2 = (
saved_checkpoint_datetimes_2_init + checkpoint_datetimes_2[1:10:3])
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps_2, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps_2,
saved_checkpoint_datetimes_2)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
@parameterized.named_parameters(
{
'testcase_name': 'flax',
'checkpoint_type': CheckpointType.CHECKPOINT_FLAX
}, {
'testcase_name': 'persistence',
'checkpoint_type': CheckpointType.CHECKPOINT_PERSISTENCE
}, {
'testcase_name': 'gda',
'checkpoint_type': CheckpointType.CHECKPOINT_GDA
})
def test_save_restore_manager_case_2_mutant(self, checkpoint_type):
config_name = 'test.test_module.ConfigName'
root_dir = os.path.join(FLAGS.test_tmpdir, 'test6', str(checkpoint_type),
'checkpoints')
tf.io.gfile.makedirs(root_dir)
current_datetime = datetime.datetime.now()
zero_datetime = datetime.datetime.fromtimestamp(0)
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=100,
max_to_keep=None)
steps = list(range(0, 10000, 1000))
checkpoint_datetimes = []
for step in steps:
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
checkpoint_datetimes.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps = steps
saved_checkpoint_datetimes = checkpoint_datetimes
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps,
saved_checkpoint_datetimes)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
del checkpoint_manager
max_to_keep = 5
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
checkpoint_manager = checkpoint_managers.CheckpointManager(
config_name=config_name,
root_dir=root_dir,
checkpoint_type=checkpoint_type,
save_interval_steps=1000,
max_to_keep=max_to_keep)
step = 10000
steps.append(step)
with mock.patch('datetime.datetime', autospec=True) as dt:
dt.utcnow.return_value = current_datetime
dt.fromtimestamp.return_value = zero_datetime
if checkpoint_manager.should_save(step):
_create_dummy_checkpoint(root_dir, step, checkpoint_type)
checkpoint_manager.save_metadata(step)
saved_checkpoint_datetimes.append(current_datetime)
current_datetime += datetime.timedelta(hours=1)
saved_steps_2 = steps[-max_to_keep:]
saved_checkpoint_datetimes_2 = saved_checkpoint_datetimes[-max_to_keep:]
filenames = [
os.path.basename(v) for v in tf.io.gfile.glob(
os.path.join(root_dir, f'{CHECKPOINT_PREFIX}*'))
]
self.assertSameElements(
filenames, _base_checkpoint_filenames(saved_steps_2, checkpoint_type))
checkpoints_filename = os.path.join(root_dir,
checkpoint_managers.CHECKPOINT_BASENAME)
expected_proto = _create_reference_checkpoint_history(
config_name, root_dir, checkpoint_type, saved_steps_2,
saved_checkpoint_datetimes_2)
self.assertCheckpointsFileProto(checkpoints_filename, expected_proto)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "9f4507d42a9b2c9cd04a87918f39cae1",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 80,
"avg_line_length": 42.21413721413722,
"alnum_prop": 0.6663383403102684,
"repo_name": "tensorflow/lingvo",
"id": "663e786826ef4317331473d699e25531c394f098",
"size": "20913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/jax/checkpoint_managers_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
} |
"""
Classes for tagging sentences sequentially, left to right. The
abstract base class SequentialBackoffTagger serves as the base
class for all the taggers in this module. Tagging of individual words
is performed by the method ``choose_tag()``, which is defined by
subclasses of SequentialBackoffTagger. If a tagger is unable to
determine a tag for the specified token, then its backoff tagger is
consulted instead. Any SequentialBackoffTagger may serve as a
backoff tagger for any other SequentialBackoffTagger.
"""
from __future__ import print_function, unicode_literals
import re
from nltk.probability import ConditionalFreqDist
from nltk.classify import NaiveBayesClassifier
from nltk.compat import python_2_unicode_compatible
from nltk.tag.api import TaggerI, FeaturesetTaggerI
from nltk import jsontags
######################################################################
#{ Abstract Base Classes
######################################################################
class SequentialBackoffTagger(TaggerI):
"""
An abstract base class for taggers that tags words sequentially,
left to right. Tagging of individual words is performed by the
``choose_tag()`` method, which should be defined by subclasses. If
a tagger is unable to determine a tag for the specified token,
then its backoff tagger is consulted.
:ivar _taggers: A list of all the taggers that should be tried to
tag a token (i.e., self and its backoff taggers).
"""
def __init__(self, backoff=None):
if backoff is None:
self._taggers = [self]
else:
self._taggers = [self] + backoff._taggers
@property
def backoff(self):
"""The backoff tagger for this tagger."""
return self._taggers[1] if len(self._taggers) > 1 else None
def tag(self, tokens):
# docs inherited from TaggerI
tags = []
for i in range(len(tokens)):
tags.append(self.tag_one(tokens, i, tags))
return list(zip(tokens, tags))
def tag_one(self, tokens, index, history):
"""
Determine an appropriate tag for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, then its backoff tagger is consulted.
:rtype: str
:type tokens: list
:param tokens: The list of words that are being tagged.
:type index: int
:param index: The index of the word whose tag should be
returned.
:type history: list(str)
:param history: A list of the tags for all words before *index*.
"""
tag = None
for tagger in self._taggers:
tag = tagger.choose_tag(tokens, index, history)
if tag is not None: break
return tag
def choose_tag(self, tokens, index, history):
"""
Decide which tag should be used for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, return None -- do not consult
the backoff tagger. This method should be overridden by
subclasses of SequentialBackoffTagger.
:rtype: str
:type tokens: list
:param tokens: The list of words that are being tagged.
:type index: int
:param index: The index of the word whose tag should be
returned.
:type history: list(str)
:param history: A list of the tags for all words before *index*.
"""
raise NotImplementedError()
@python_2_unicode_compatible
class ContextTagger(SequentialBackoffTagger):
"""
An abstract base class for sequential backoff taggers that choose
a tag for a token based on the value of its "context". Different
subclasses are used to define different contexts.
A ContextTagger chooses the tag for a token by calculating the
token's context, and looking up the corresponding tag in a table.
This table can be constructed manually; or it can be automatically
constructed based on a training corpus, using the ``_train()``
factory method.
:ivar _context_to_tag: Dictionary mapping contexts to tags.
"""
def __init__(self, context_to_tag, backoff=None):
"""
:param context_to_tag: A dictionary mapping contexts to tags.
:param backoff: The backoff tagger that should be used for this tagger.
"""
SequentialBackoffTagger.__init__(self, backoff)
self._context_to_tag = (context_to_tag if context_to_tag else {})
def context(self, tokens, index, history):
"""
:return: the context that should be used to look up the tag
for the specified token; or None if the specified token
should not be handled by this tagger.
:rtype: (hashable)
"""
raise NotImplementedError()
def choose_tag(self, tokens, index, history):
context = self.context(tokens, index, history)
return self._context_to_tag.get(context)
def size(self):
"""
:return: The number of entries in the table used by this
tagger to map from contexts to tags.
"""
return len(self._context_to_tag)
def __repr__(self):
return '<%s: size=%d>' % (self.__class__.__name__, self.size())
def _train(self, tagged_corpus, cutoff=0, verbose=False):
"""
Initialize this ContextTagger's ``_context_to_tag`` table
based on the given training data. In particular, for each
context ``c`` in the training data, set
``_context_to_tag[c]`` to the most frequent tag for that
context. However, exclude any contexts that are already
tagged perfectly by the backoff tagger(s).
The old value of ``self._context_to_tag`` (if any) is discarded.
:param tagged_corpus: A tagged corpus. Each item should be
a list of (word, tag tuples.
:param cutoff: If the most likely tag for a context occurs
fewer than cutoff times, then exclude it from the
context-to-tag table for the new tagger.
"""
token_count = hit_count = 0
# A context is considered 'useful' if it's not already tagged
# perfectly by the backoff tagger.
useful_contexts = set()
# Count how many times each tag occurs in each context.
fd = ConditionalFreqDist()
for sentence in tagged_corpus:
tokens, tags = zip(*sentence)
for index, (token, tag) in enumerate(sentence):
# Record the event.
token_count += 1
context = self.context(tokens, index, tags[:index])
if context is None: continue
fd[context][tag] += 1
# If the backoff got it wrong, this context is useful:
if (self.backoff is None or
tag != self.backoff.tag_one(tokens, index, tags[:index])):
useful_contexts.add(context)
# Build the context_to_tag table -- for each context, figure
# out what the most likely tag is. Only include contexts that
# we've seen at least `cutoff` times.
for context in useful_contexts:
best_tag = fd[context].max()
hits = fd[context][best_tag]
if hits > cutoff:
self._context_to_tag[context] = best_tag
hit_count += hits
# Display some stats, if requested.
if verbose:
size = len(self._context_to_tag)
backoff = 100 - (hit_count * 100.0)/ token_count
pruning = 100 - (size * 100.0) / len(fd.conditions())
print("[Trained Unigram tagger:", end=' ')
print("size=%d, backoff=%.2f%%, pruning=%.2f%%]" % (
size, backoff, pruning))
######################################################################
#{ Tagger Classes
######################################################################
@python_2_unicode_compatible
@jsontags.register_tag
class DefaultTagger(SequentialBackoffTagger):
"""
A tagger that assigns the same tag to every token.
>>> from nltk.tag import DefaultTagger
>>> default_tagger = DefaultTagger('NN')
>>> list(default_tagger.tag('This is a test'.split()))
[('This', 'NN'), ('is', 'NN'), ('a', 'NN'), ('test', 'NN')]
This tagger is recommended as a backoff tagger, in cases where
a more powerful tagger is unable to assign a tag to the word
(e.g. because the word was not seen during training).
:param tag: The tag to assign to each token
:type tag: str
"""
json_tag = 'nltk.tag.sequential.DefaultTagger'
def __init__(self, tag):
self._tag = tag
SequentialBackoffTagger.__init__(self, None)
def encode_json_obj(self):
return self._tag
@classmethod
def decode_json_obj(cls, obj):
tag = obj
return cls(tag)
def choose_tag(self, tokens, index, history):
return self._tag # ignore token and history
def __repr__(self):
return '<DefaultTagger: tag=%s>' % self._tag
@jsontags.register_tag
class NgramTagger(ContextTagger):
"""
A tagger that chooses a token's tag based on its word string and
on the preceding n word's tags. In particular, a tuple
(tags[i-n:i-1], words[i]) is looked up in a table, and the
corresponding tag is returned. N-gram taggers are typically
trained on a tagged corpus.
Train a new NgramTagger using the given training data or
the supplied model. In particular, construct a new tagger
whose table maps from each context (tag[i-n:i-1], word[i])
to the most frequent tag for that context. But exclude any
contexts that are already tagged perfectly by the backoff
tagger.
:param train: A tagged corpus consisting of a list of tagged
sentences, where each sentence is a list of (word, tag) tuples.
:param backoff: A backoff tagger, to be used by the new
tagger if it encounters an unknown context.
:param cutoff: If the most likely tag for a context occurs
fewer than *cutoff* times, then exclude it from the
context-to-tag table for the new tagger.
"""
json_tag = 'nltk.tag.sequential.NgramTagger'
def __init__(self, n, train=None, model=None,
backoff=None, cutoff=0, verbose=False):
self._n = n
self._check_params(train, model)
ContextTagger.__init__(self, model, backoff)
if train:
self._train(train, cutoff, verbose)
def encode_json_obj(self):
return self._n, self._context_to_tag, self.backoff
@classmethod
def decode_json_obj(cls, obj):
_n, _context_to_tag, backoff = obj
return cls(_n, model=_context_to_tag, backoff=backoff)
def context(self, tokens, index, history):
tag_context = tuple(history[max(0,index-self._n+1):index])
return tag_context, tokens[index]
@jsontags.register_tag
class UnigramTagger(NgramTagger):
"""
Unigram Tagger
The UnigramTagger finds the most likely tag for each word in a training
corpus, and then uses that information to assign tags to new tokens.
>>> from nltk.corpus import brown
>>> from nltk.tag import UnigramTagger
>>> test_sent = brown.sents(categories='news')[0]
>>> unigram_tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
>>> for tok, tag in unigram_tagger.tag(test_sent):
... print("(%s, %s), " % (tok, tag))
(The, AT), (Fulton, NP-TL), (County, NN-TL), (Grand, JJ-TL),
(Jury, NN-TL), (said, VBD), (Friday, NR), (an, AT),
(investigation, NN), (of, IN), (Atlanta's, NP$), (recent, JJ),
(primary, NN), (election, NN), (produced, VBD), (``, ``),
(no, AT), (evidence, NN), ('', ''), (that, CS), (any, DTI),
(irregularities, NNS), (took, VBD), (place, NN), (., .),
:param train: The corpus of training data, a list of tagged sentences
:type train: list(list(tuple(str, str)))
:param model: The tagger model
:type model: dict
:param backoff: Another tagger which this tagger will consult when it is
unable to tag a word
:type backoff: TaggerI
:param cutoff: The number of instances of training data the tagger must see
in order not to use the backoff tagger
:type cutoff: int
"""
json_tag = 'nltk.tag.sequential.UnigramTagger'
def __init__(self, train=None, model=None,
backoff=None, cutoff=0, verbose=False):
NgramTagger.__init__(self, 1, train, model,
backoff, cutoff, verbose)
def encode_json_obj(self):
return self._context_to_tag, self.backoff
@classmethod
def decode_json_obj(cls, obj):
_context_to_tag, backoff = obj
return cls(model=_context_to_tag, backoff=backoff)
def context(self, tokens, index, history):
return tokens[index]
@jsontags.register_tag
class BigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string and on
the preceding words' tag. In particular, a tuple consisting
of the previous tag and the word is looked up in a table, and
the corresponding tag is returned.
:param train: The corpus of training data, a list of tagged sentences
:type train: list(list(tuple(str, str)))
:param model: The tagger model
:type model: dict
:param backoff: Another tagger which this tagger will consult when it is
unable to tag a word
:type backoff: TaggerI
:param cutoff: The number of instances of training data the tagger must see
in order not to use the backoff tagger
:type cutoff: int
"""
json_tag = 'nltk.tag.sequential.BigramTagger'
def __init__(self, train=None, model=None,
backoff=None, cutoff=0, verbose=False):
NgramTagger.__init__(self, 2, train, model,
backoff, cutoff, verbose)
def encode_json_obj(self):
return self._context_to_tag, self.backoff
@classmethod
def decode_json_obj(cls, obj):
_context_to_tag, backoff = obj
return cls(model=_context_to_tag, backoff=backoff)
@jsontags.register_tag
class TrigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string and on
the preceding two words' tags. In particular, a tuple consisting
of the previous two tags and the word is looked up in a table, and
the corresponding tag is returned.
:param train: The corpus of training data, a list of tagged sentences
:type train: list(list(tuple(str, str)))
:param model: The tagger model
:type model: dict
:param backoff: Another tagger which this tagger will consult when it is
unable to tag a word
:type backoff: TaggerI
:param cutoff: The number of instances of training data the tagger must see
in order not to use the backoff tagger
:type cutoff: int
"""
json_tag = 'nltk.tag.sequential.TrigramTagger'
def __init__(self, train=None, model=None,
backoff=None, cutoff=0, verbose=False):
NgramTagger.__init__(self, 3, train, model,
backoff, cutoff, verbose)
def encode_json_obj(self):
return self._context_to_tag, self.backoff
@classmethod
def decode_json_obj(cls, obj):
_context_to_tag, backoff = obj
return cls(model=_context_to_tag, backoff=backoff)
@jsontags.register_tag
class AffixTagger(ContextTagger):
"""
A tagger that chooses a token's tag based on a leading or trailing
substring of its word string. (It is important to note that these
substrings are not necessarily "true" morphological affixes). In
particular, a fixed-length substring of the word is looked up in a
table, and the corresponding tag is returned. Affix taggers are
typically constructed by training them on a tagged corpus.
Construct a new affix tagger.
:param affix_length: The length of the affixes that should be
considered during training and tagging. Use negative
numbers for suffixes.
:param min_stem_length: Any words whose length is less than
min_stem_length+abs(affix_length) will be assigned a
tag of None by this tagger.
"""
json_tag = 'nltk.tag.sequential.AffixTagger'
def __init__(self, train=None, model=None, affix_length=-3,
min_stem_length=2, backoff=None, cutoff=0, verbose=False):
self._check_params(train, model)
ContextTagger.__init__(self, model, backoff)
self._affix_length = affix_length
self._min_word_length = min_stem_length + abs(affix_length)
if train:
self._train(train, cutoff, verbose)
def encode_json_obj(self):
return self._affix_length, self._min_word_length, self._context_to_tag, self.backoff
@classmethod
def decode_json_obj(cls, obj):
_affix_length, _min_word_length, _context_to_tag, backoff = obj
return cls(
affix_length=_affix_length,
min_stem_length=_min_word_length - abs(_affix_length),
model=_context_to_tag,
backoff=backoff
)
def context(self, tokens, index, history):
token = tokens[index]
if len(token) < self._min_word_length:
return None
elif self._affix_length > 0:
return token[:self._affix_length]
else:
return token[self._affix_length:]
@python_2_unicode_compatible
@jsontags.register_tag
class RegexpTagger(SequentialBackoffTagger):
"""
Regular Expression Tagger
The RegexpTagger assigns tags to tokens by comparing their
word strings to a series of regular expressions. The following tagger
uses word suffixes to make guesses about the correct Brown Corpus part
of speech tag:
>>> from nltk.corpus import brown
>>> from nltk.tag import RegexpTagger
>>> test_sent = brown.sents(categories='news')[0]
>>> regexp_tagger = RegexpTagger(
... [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
... (r'(The|the|A|a|An|an)$', 'AT'), # articles
... (r'.*able$', 'JJ'), # adjectives
... (r'.*ness$', 'NN'), # nouns formed from adjectives
... (r'.*ly$', 'RB'), # adverbs
... (r'.*s$', 'NNS'), # plural nouns
... (r'.*ing$', 'VBG'), # gerunds
... (r'.*ed$', 'VBD'), # past tense verbs
... (r'.*', 'NN') # nouns (default)
... ])
>>> regexp_tagger
<Regexp Tagger: size=9>
>>> regexp_tagger.tag(test_sent)
[('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'), ('Grand', 'NN'), ('Jury', 'NN'),
('said', 'NN'), ('Friday', 'NN'), ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'),
("Atlanta's", 'NNS'), ('recent', 'NN'), ('primary', 'NN'), ('election', 'NN'),
('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'), ('evidence', 'NN'), ("''", 'NN'),
('that', 'NN'), ('any', 'NN'), ('irregularities', 'NNS'), ('took', 'NN'),
('place', 'NN'), ('.', 'NN')]
:type regexps: list(tuple(str, str))
:param regexps: A list of ``(regexp, tag)`` pairs, each of
which indicates that a word matching ``regexp`` should
be tagged with ``tag``. The pairs will be evalutated in
order. If none of the regexps match a word, then the
optional backoff tagger is invoked, else it is
assigned the tag None.
"""
json_tag = 'nltk.tag.sequential.RegexpTagger'
def __init__(self, regexps, backoff=None):
"""
"""
SequentialBackoffTagger.__init__(self, backoff)
self._regexs = [(re.compile(regexp), tag,) for regexp, tag in regexps]
def encode_json_obj(self):
return [(regexp.patten, tag,) for regexp, tag in self._regexs], self.backoff
@classmethod
def decode_json_obj(cls, obj):
regexps, backoff = obj
self = cls(())
self._regexs = [(re.compile(regexp), tag,) for regexp, tag in regexps]
SequentialBackoffTagger.__init__(self, backoff)
return self
def choose_tag(self, tokens, index, history):
for regexp, tag in self._regexs:
if re.match(regexp, tokens[index]):
return tag
return None
def __repr__(self):
return '<Regexp Tagger: size=%d>' % len(self._regexs)
@python_2_unicode_compatible
class ClassifierBasedTagger(SequentialBackoffTagger, FeaturesetTaggerI):
"""
A sequential tagger that uses a classifier to choose the tag for
each token in a sentence. The featureset input for the classifier
is generated by a feature detector function::
feature_detector(tokens, index, history) -> featureset
Where tokens is the list of unlabeled tokens in the sentence;
index is the index of the token for which feature detection
should be performed; and history is list of the tags for all
tokens before index.
Construct a new classifier-based sequential tagger.
:param feature_detector: A function used to generate the
featureset input for the classifier::
feature_detector(tokens, index, history) -> featureset
:param train: A tagged corpus consisting of a list of tagged
sentences, where each sentence is a list of (word, tag) tuples.
:param backoff: A backoff tagger, to be used by the new tagger
if it encounters an unknown context.
:param classifier_builder: A function used to train a new
classifier based on the data in *train*. It should take
one argument, a list of labeled featuresets (i.e.,
(featureset, label) tuples).
:param classifier: The classifier that should be used by the
tagger. This is only useful if you want to manually
construct the classifier; normally, you would use *train*
instead.
:param backoff: A backoff tagger, used if this tagger is
unable to determine a tag for a given token.
:param cutoff_prob: If specified, then this tagger will fall
back on its backoff tagger if the probability of the most
likely tag is less than *cutoff_prob*.
"""
def __init__(self, feature_detector=None, train=None,
classifier_builder=NaiveBayesClassifier.train,
classifier=None, backoff=None,
cutoff_prob=None, verbose=False):
self._check_params(train, classifier)
SequentialBackoffTagger.__init__(self, backoff)
if (train and classifier) or (not train and not classifier):
raise ValueError('Must specify either training data or '
'trained classifier.')
if feature_detector is not None:
self._feature_detector = feature_detector
# The feature detector function, used to generate a featureset
# or each token: feature_detector(tokens, index, history) -> featureset
self._cutoff_prob = cutoff_prob
"""Cutoff probability for tagging -- if the probability of the
most likely tag is less than this, then use backoff."""
self._classifier = classifier
"""The classifier used to choose a tag for each token."""
if train:
self._train(train, classifier_builder, verbose)
def choose_tag(self, tokens, index, history):
# Use our feature detector to get the featureset.
featureset = self.feature_detector(tokens, index, history)
# Use the classifier to pick a tag. If a cutoff probability
# was specified, then check that the tag's probability is
# higher than that cutoff first; otherwise, return None.
if self._cutoff_prob is None:
return self._classifier.classify(featureset)
pdist = self._classifier.prob_classify(featureset)
tag = pdist.max()
return tag if pdist.prob(tag) >= self._cutoff_prob else None
def _train(self, tagged_corpus, classifier_builder, verbose):
"""
Build a new classifier, based on the given training data
*tagged_corpus*.
"""
classifier_corpus = []
if verbose:
print('Constructing training corpus for classifier.')
for sentence in tagged_corpus:
history = []
untagged_sentence, tags = zip(*sentence)
for index in range(len(sentence)):
featureset = self.feature_detector(untagged_sentence,
index, history)
classifier_corpus.append( (featureset, tags[index]) )
history.append(tags[index])
if verbose:
print('Training classifier (%d instances)' % len(classifier_corpus))
self._classifier = classifier_builder(classifier_corpus)
def __repr__(self):
return '<ClassifierBasedTagger: %r>' % self._classifier
def feature_detector(self, tokens, index, history):
"""
Return the feature detector that this tagger uses to generate
featuresets for its classifier. The feature detector is a
function with the signature::
feature_detector(tokens, index, history) -> featureset
See ``classifier()``
"""
return self._feature_detector(tokens, index, history)
def classifier(self):
"""
Return the classifier that this tagger uses to choose a tag
for each word in a sentence. The input for this classifier is
generated using this tagger's feature detector.
See ``feature_detector()``
"""
return self._classifier
class ClassifierBasedPOSTagger(ClassifierBasedTagger):
"""
A classifier based part of speech tagger.
"""
def feature_detector(self, tokens, index, history):
word = tokens[index]
if index == 0:
prevword = prevprevword = None
prevtag = prevprevtag = None
elif index == 1:
prevword = tokens[index-1].lower()
prevprevword = None
prevtag = history[index-1]
prevprevtag = None
else:
prevword = tokens[index-1].lower()
prevprevword = tokens[index-2].lower()
prevtag = history[index-1]
prevprevtag = history[index-2]
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word):
shape = 'number'
elif re.match('\W+$', word):
shape = 'punct'
elif re.match('[A-Z][a-z]+$', word):
shape = 'upcase'
elif re.match('[a-z]+$', word):
shape = 'downcase'
elif re.match('\w+$', word):
shape = 'mixedcase'
else:
shape = 'other'
features = {
'prevtag': prevtag,
'prevprevtag': prevprevtag,
'word': word,
'word.lower': word.lower(),
'suffix3': word.lower()[-3:],
'suffix2': word.lower()[-2:],
'suffix1': word.lower()[-1:],
'prevprevword': prevprevword,
'prevword': prevword,
'prevtag+word': '%s+%s' % (prevtag, word.lower()),
'prevprevtag+word': '%s+%s' % (prevprevtag, word.lower()),
'prevword+word': '%s+%s' % (prevword, word.lower()),
'shape': shape,
}
return features
| {
"content_hash": "77641a4f8b23152df73c00f34b63edcd",
"timestamp": "",
"source": "github",
"line_count": 727,
"max_line_length": 94,
"avg_line_length": 38.123796423658874,
"alnum_prop": 0.6057872708904604,
"repo_name": "NorfolkDataSci/presentations",
"id": "b98ae52fd046a2acf5b7e33921b15644925cd193",
"size": "28062",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/nlp/nltk/tag/sequential.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "207"
},
{
"name": "CSS",
"bytes": "596341"
},
{
"name": "HTML",
"bytes": "8655333"
},
{
"name": "HiveQL",
"bytes": "1309"
},
{
"name": "JavaScript",
"bytes": "1867647"
},
{
"name": "Jupyter Notebook",
"bytes": "612639"
},
{
"name": "PLpgSQL",
"bytes": "1050"
},
{
"name": "Python",
"bytes": "8417061"
},
{
"name": "R",
"bytes": "9306"
}
],
"symlink_target": ""
} |
"""
Overview
========
pymenuconfig is a small and simple frontend to Kconfiglib that's written
entirely in Python using Tkinter as its GUI toolkit.
Motivation
==========
Kconfig is a nice and powerful framework for build-time configuration and lots
of projects already benefit from using it. Kconfiglib allows to utilize power of
Kconfig by using scripts written in pure Python, without requiring one to build
Linux kernel tools written in C (this can be quite tedious on anything that's
not *nix). The aim of this project is to implement simple and small Kconfiglib
GUI frontend that runs on as much systems as possible.
Tkinter GUI toolkit is a natural choice if portability is considered, as it's
a part of Python standard library and is available virtually in every CPython
installation.
User interface
==============
I've tried to replicate look and fill of Linux kernel 'menuconfig' tool that
many users are used to, including keyboard-oriented control and textual
representation of menus with fixed-width font.
Usage
=====
The pymenuconfig module is executable and parses command-line args, so the
most simple way to run menuconfig is to execute script directly:
python pymenuconfig.py --kconfig Kconfig
As with most command-line tools list of options can be obtained with '--help':
python pymenuconfig.py --help
If installed with setuptools, one can run it like this:
python -m pymenuconfig --kconfig Kconfig
In case you're making a wrapper around menuconfig, you can either call main():
import pymenuconfig
pymenuconfig.main(['--kconfig', 'Kconfig'])
Or import MenuConfig class, instantiate it and manually run Tkinter's mainloop:
import tkinter
import kconfiglib
from pymenuconfig import MenuConfig
kconfig = kconfiglib.Kconfig()
mconf = MenuConfig(kconfig)
tkinter.mainloop()
"""
from __future__ import print_function
import os
import sys
import argparse
import kconfiglib
# Tk is imported differently depending on python major version
if sys.version_info[0] < 3:
import Tkinter as tk
import tkFont as font
import tkFileDialog as filedialog
import tkMessageBox as messagebox
else:
import tkinter as tk
from tkinter import font
from tkinter import filedialog
from tkinter import messagebox
class ListEntry(object):
"""
Represents visible menu node and holds all information related to displaying
menu node in a Listbox.
Instances of this class also handle all interaction with main window.
A node is displayed as a single line of text:
PREFIX INDENT BODY POSTFIX
- The PREFIX is always 3 characters or more and can take following values:
' ' comment, menu, bool choice, etc.
Inside menus:
'< >' bool symbol has value 'n'
'<*>' bool symbol has value 'y'
'[ ]' tristate symbol has value 'n'
'[M]' tristate symbol has value 'm'
'[*]' tristate symbol has value 'y'
'- -' symbol has value 'n' that's not editable
'-M-' symbol has value 'm' that's not editable
'-*-' symbol has value 'y' that's not editable
'(M)' tristate choice has value 'm'
'(*)' tristate choice has value 'y'
'(some value)' value of non-bool/tristate symbols
Inside choices:
'( )' symbol has value 'n'
'(M)' symbol has value 'm'
'(*)' symbol has value 'y'
- INDENT is a sequence of space characters. It's used in implicit menus, and
adds 2 spaces for each nesting level
- BODY is a menu node prompt. '***' is added if node is a comment
- POSTFIX adds '(NEW)', '--->' and selected choice symbol where applicable
Attributes:
node:
MenuNode instance this ListEntry is created for.
visible:
Whether entry should be shown in main window.
text:
String to display in a main window's Listbox.
refresh():
Updates .visible and .text attribute values.
set_tristate_value():
Set value for bool/tristate symbols, value should be one of 0,1,2 or None.
Usually it's called when user presses 'y', 'n', 'm' key.
set_str_value():
Set value for non-bool/tristate symbols, value is a string. Usually called
with a value returned by one of MenuConfig.ask_for_* methods.
toggle():
Toggle bool/tristate symbol value. Called when '<Space>' key is pressed in
a main window. Also selects choice value.
select():
Called when '<Return>' key is pressed in a main window with 'SELECT'
action selected. Displays submenu, choice selection menu, or just selects
choice value. For non-bool/tristate symbols asks MenuConfig window to
handle value input via one of MenuConfig.ask_for_* methods.
show_help():
Called when '<Return>' key is pressed in a main window with 'HELP' action
selected. Prepares text help and calls MenuConfig.show_text() to display
text window.
"""
# How to display value of BOOL and TRISTATE symbols
TRI_TO_DISPLAY = {
0: ' ',
1: 'M',
2: '*'
}
def __init__(self, mconf, node, indent):
self.indent = indent
self.node = node
self.menuconfig = mconf
self.visible = False
self.text = None
def __str__(self):
return self.text
def _is_visible(self):
node = self.node
v = True
v = v and node.prompt is not None
# It should be enough to check if prompt expression is not false and
# for menu nodes whether 'visible if' is not false
v = v and kconfiglib.expr_value(node.prompt[1]) > 0
if node.item == kconfiglib.MENU:
v = v and kconfiglib.expr_value(node.visibility) > 0
# If node references Symbol, then we also account for symbol visibility
# TODO: need to re-think whether this is needed
if isinstance(node.item, kconfiglib.Symbol):
if node.item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
v = v and len(node.item.assignable) > 0
else:
v = v and node.item.visibility > 0
return v
def _get_text(self):
"""
Compute textual representation of menu node (a line in ListView)
"""
node = self.node
item = node.item
# Determine prefix
prefix = ' '
if (isinstance(item, kconfiglib.Symbol) and item.choice is None or
isinstance(item, kconfiglib.Choice) and item.type is kconfiglib.TRISTATE):
# The node is for either a symbol outside of choice statement
# or a tristate choice
if item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
value = ListEntry.TRI_TO_DISPLAY[item.tri_value]
if len(item.assignable) > 1:
# Symbol is editable
if 1 in item.assignable:
prefix = '<{}>'.format(value)
else:
prefix = '[{}]'.format(value)
else:
# Symbol is not editable
prefix = '-{}-'.format(value)
else:
prefix = '({})'.format(item.str_value)
elif isinstance(item, kconfiglib.Symbol) and item.choice is not None:
# The node is for symbol inside choice statement
if item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
value = ListEntry.TRI_TO_DISPLAY[item.tri_value]
if len(item.assignable) > 0:
# Symbol is editable
prefix = '({})'.format(value)
else:
# Symbol is not editable
prefix = '-{}-'.format(value)
else:
prefix = '({})'.format(item.str_value)
# Prefix should be at least 3 chars long
if len(prefix) < 3:
prefix += ' ' * (3 - len(prefix))
# Body
body = ''
if node.prompt is not None:
if item is kconfiglib.COMMENT:
body = '*** {} ***'.format(node.prompt[0])
else:
body = node.prompt[0]
# Suffix
is_menu = False
is_new = False
if (item is kconfiglib.MENU
or isinstance(item, kconfiglib.Symbol) and node.is_menuconfig
or isinstance(item, kconfiglib.Choice)):
is_menu = True
if isinstance(item, kconfiglib.Symbol) and item.user_value is None:
is_new = True
# For symbol inside choice that has 'y' value, '(NEW)' is not displayed
if (isinstance(item, kconfiglib.Symbol)
and item.choice and item.choice.tri_value == 2):
is_new = False
# Choice selection - displayed only for choices which have 'y' value
choice_selection = None
if isinstance(item, kconfiglib.Choice) and node.item.str_value == 'y':
choice_selection = ''
if item.selection is not None:
sym = item.selection
if sym.nodes and sym.nodes[0].prompt is not None:
choice_selection = sym.nodes[0].prompt[0]
text = ' {prefix} {indent}{body}{choice}{new}{menu}'.format(
prefix=prefix,
indent=' ' * self.indent,
body=body,
choice='' if choice_selection is None else ' ({})'.format(
choice_selection
),
new=' (NEW)' if is_new else '',
menu=' --->' if is_menu else ''
)
return text
def refresh(self):
self.visible = self._is_visible()
self.text = self._get_text()
def set_tristate_value(self, value):
"""
Call to change value of BOOL, TRISTATE symbols
It's preferred to use this instead of item.set_value as it handles
all necessary interaction with MenuConfig window when symbol value
changes
None value is accepted but ignored
"""
item = self.node.item
if (isinstance(item, (kconfiglib.Symbol, kconfiglib.Choice))
and item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE)
and value is not None):
if value in item.assignable:
item.set_value(value)
elif value == 2 and 1 in item.assignable:
print(
'Symbol {} value is limited to \'m\'. Setting value \'m\' instead of \'y\''.format(item.name),
file=sys.stderr
)
item.set_value(1)
self.menuconfig.mark_as_changed()
self.menuconfig.refresh_display()
def set_str_value(self, value):
"""
Call to change value of HEX, INT, STRING symbols
It's preferred to use this instead of item.set_value as it handles
all necessary interaction with MenuConfig window when symbol value
changes
None value is accepted but ignored
"""
item = self.node.item
if (isinstance(item, kconfiglib.Symbol)
and item.type in (kconfiglib.INT, kconfiglib.HEX, kconfiglib.STRING)
and value is not None):
item.set_value(value)
self.menuconfig.mark_as_changed()
self.menuconfig.refresh_display()
def toggle(self):
"""
Called when <space> key is pressed
"""
item = self.node.item
if (isinstance(item, (kconfiglib.Symbol, kconfiglib.Choice))
and item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE)):
value = item.tri_value
# Find next value in Symbol/Choice.assignable, or use assignable[0]
try:
it = iter(item.assignable)
while value != next(it):
pass
self.set_tristate_value(next(it))
except StopIteration:
self.set_tristate_value(item.assignable[0])
def select(self):
"""
Called when <Return> key is pressed and SELECT action is selected
"""
item = self.node.item
# - Menu: dive into submenu
# - INT, HEX, STRING symbol: raise prompt to enter symbol value
# - BOOL, TRISTATE symbol inside 'y'-valued Choice: set 'y' value
if (item is kconfiglib.MENU
or isinstance(item, kconfiglib.Symbol) and self.node.is_menuconfig
or isinstance(item, kconfiglib.Choice)):
# Dive into submenu
self.menuconfig.show_submenu(self.node)
elif (isinstance(item, kconfiglib.Symbol) and item.type in
(kconfiglib.INT, kconfiglib.HEX, kconfiglib.STRING)):
# Raise prompt to enter symbol value
ident = self.node.prompt[0] if self.node.prompt is not None else None
title = 'Symbol: {}'.format(item.name)
if item.type is kconfiglib.INT:
# Find enabled ranges
ranges = [
(int(start.str_value), int(end.str_value))
for start, end, expr in item.ranges
if kconfiglib.expr_value(expr) > 0
]
# Raise prompt
self.set_str_value(str(self.menuconfig.ask_for_int(
ident=ident,
title=title,
value=item.str_value,
ranges=ranges
)))
elif item.type is kconfiglib.HEX:
# Find enabled ranges
ranges = [
(int(start.str_value, base=16), int(end.str_value, base=16))
for start, end, expr in item.ranges
if kconfiglib.expr_value(expr) > 0
]
# Raise prompt
self.set_str_value(hex(self.menuconfig.ask_for_hex(
ident=ident,
title=title,
value=item.str_value,
ranges=ranges
)))
elif item.type is kconfiglib.STRING:
# Raise prompt
self.set_str_value(self.menuconfig.ask_for_string(
ident=ident,
title=title,
value=item.str_value
))
elif (isinstance(item, kconfiglib.Symbol)
and item.choice is not None and item.choice.tri_value == 2):
# Symbol inside choice -> set symbol value to 'y'
self.set_tristate_value(2)
def show_help(self):
node = self.node
item = self.node.item
if isinstance(item, (kconfiglib.Symbol, kconfiglib.Choice)):
title = 'Help for symbol: {}'.format(item.name)
if node.help:
help = node.help
else:
help = 'There is no help available for this option.\n'
lines = []
lines.append(help)
lines.append(
'Symbol: {} [={}]'.format(
item.name if item.name else '<UNNAMED>', item.str_value
)
)
lines.append('Type : {}'.format(kconfiglib.TYPE_TO_STR[item.type]))
for n in item.nodes:
lines.append('Prompt: {}'.format(n.prompt[0] if n.prompt else '<EMPTY>'))
lines.append(' Defined at {}:{}'.format(n.filename, n.linenr))
lines.append(' Depends on: {}'.format(kconfiglib.expr_str(n.dep)))
text = '\n'.join(lines)
else:
title = 'Help'
text = 'Help not available for this menu node.\n'
self.menuconfig.show_text(text, title)
self.menuconfig.refresh_display()
class EntryDialog(object):
"""
Creates modal dialog (top-level Tk window) with labels, entry box and two
buttons: OK and CANCEL.
"""
def __init__(self, master, text, title, ident=None, value=None):
self.master = master
dlg = self.dlg = tk.Toplevel(master)
self.dlg.withdraw() #hiden window
dlg.title(title)
# Identifier label
if ident is not None:
self.label_id = tk.Label(dlg, anchor=tk.W, justify=tk.LEFT)
self.label_id['font'] = font.nametofont('TkFixedFont')
self.label_id['text'] = '# {}'.format(ident)
self.label_id.pack(fill=tk.X, padx=2, pady=2)
# Label
self.label = tk.Label(dlg, anchor=tk.W, justify=tk.LEFT)
self.label['font'] = font.nametofont('TkFixedFont')
self.label['text'] = text
self.label.pack(fill=tk.X, padx=10, pady=4)
# Entry box
self.entry = tk.Entry(dlg)
self.entry['font'] = font.nametofont('TkFixedFont')
self.entry.pack(fill=tk.X, padx=2, pady=2)
# Frame for buttons
self.frame = tk.Frame(dlg)
self.frame.pack(padx=2, pady=2)
# Button
self.btn_accept = tk.Button(self.frame, text='< Ok >', command=self.accept)
self.btn_accept['font'] = font.nametofont('TkFixedFont')
self.btn_accept.pack(side=tk.LEFT, padx=2)
self.btn_cancel = tk.Button(self.frame, text='< Cancel >', command=self.cancel)
self.btn_cancel['font'] = font.nametofont('TkFixedFont')
self.btn_cancel.pack(side=tk.LEFT, padx=2)
# Bind Enter and Esc keys
self.dlg.bind('<Return>', self.accept)
self.dlg.bind('<Escape>', self.cancel)
# Dialog is resizable only by width
self.dlg.resizable(1, 0)
# Set supplied value (if any)
if value is not None:
self.entry.insert(0, value)
self.entry.selection_range(0, tk.END)
# By default returned value is None. To caller this means that entry
# process was cancelled
self.value = None
# Modal dialog
dlg.transient(master)
dlg.grab_set()
# Center dialog window
_center_window_above_parent(master, dlg)
self.dlg.deiconify() # show window
# Focus entry field
self.entry.focus_set()
def accept(self, ev=None):
self.value = self.entry.get()
self.dlg.destroy()
def cancel(self, ev=None):
self.dlg.destroy()
class TextDialog(object):
def __init__(self, master, text, title):
self.master = master
dlg = self.dlg = tk.Toplevel(master)
self.dlg.withdraw() #hiden window
dlg.title(title)
dlg.minsize(600,400)
# Text
self.text = tk.Text(dlg, height=1)
self.text['font'] = font.nametofont('TkFixedFont')
self.text.insert(tk.END, text)
# Make text read-only
self.text['state'] = tk.DISABLED
self.text.pack(fill=tk.BOTH, expand=1, padx=4, pady=4)
# Frame for buttons
self.frame = tk.Frame(dlg)
self.frame.pack(padx=2, pady=2)
# Button
self.btn_accept = tk.Button(self.frame, text='< Ok >', command=self.accept)
self.btn_accept['font'] = font.nametofont('TkFixedFont')
self.btn_accept.pack(side=tk.LEFT, padx=2)
# Bind Enter and Esc keys
self.dlg.bind('<Return>', self.accept)
self.dlg.bind('<Escape>', self.cancel)
# Modal dialog
dlg.transient(master)
dlg.grab_set()
# Center dialog window
_center_window_above_parent(master, dlg)
self.dlg.deiconify() # show window
# Focus entry field
self.text.focus_set()
def accept(self, ev=None):
self.dlg.destroy()
def cancel(self, ev=None):
self.dlg.destroy()
class MenuConfig(object):
(
ACTION_SELECT,
ACTION_EXIT,
ACTION_HELP,
ACTION_LOAD,
ACTION_SAVE,
ACTION_SAVE_AS
) = range(6)
ACTIONS = (
('Select', ACTION_SELECT),
('Exit', ACTION_EXIT),
('Help', ACTION_HELP),
('Load', ACTION_LOAD),
('Save', ACTION_SAVE),
('Save as', ACTION_SAVE_AS),
)
def __init__(self, kconfig, __silent=None):
self.kconfig = kconfig
self.__silent = __silent
if self.__silent is True:
return
# Instantiate Tk widgets
self.root = tk.Tk()
self.root.withdraw() #hiden window
dlg = self.root
# Window title
dlg.title('pymenuconfig')
# Some empirical window size
dlg.minsize(500, 300)
dlg.geometry('800x600')
# Label that shows position in menu tree
self.label_position = tk.Label(
dlg,
anchor=tk.W,
justify=tk.LEFT,
font=font.nametofont('TkFixedFont')
)
self.label_position.pack(fill=tk.X, padx=2)
# 'Tip' frame and text
self.frame_tip = tk.LabelFrame(
dlg,
text='Tip'
)
self.label_tip = tk.Label(
self.frame_tip,
anchor=tk.W,
justify=tk.LEFT,
font=font.nametofont('TkFixedFont')
)
self.label_tip['text'] = '\n'.join([
'Arrow keys navigate the menu. <Enter> performs selected operation (set of buttons at the bottom)',
'Pressing <Y> includes, <N> excludes, <M> modularizes features',
'Press <Esc> to go one level up. Press <Esc> at top level to exit',
'Legend: [*] built-in [ ] excluded <M> module < > module capable'
])
self.label_tip.pack(fill=tk.BOTH, expand=1, padx=4, pady=4)
self.frame_tip.pack(fill=tk.X, padx=2)
# Main ListBox where all the magic happens
self.list = tk.Listbox(
dlg,
selectmode=tk.SINGLE,
activestyle=tk.NONE,
font=font.nametofont('TkFixedFont'),
height=1,
)
self.list['foreground'] = 'Blue'
self.list['background'] = 'Gray95'
# Make selection invisible
self.list['selectbackground'] = self.list['background']
self.list['selectforeground'] = self.list['foreground']
self.list.pack(fill=tk.BOTH, expand=1, padx=20, ipadx=2)
# Frame with radio buttons
self.frame_radio = tk.Frame(dlg)
self.radio_buttons = []
self.tk_selected_action = tk.IntVar()
for text, value in MenuConfig.ACTIONS:
btn = tk.Radiobutton(
self.frame_radio,
variable=self.tk_selected_action,
value=value
)
btn['text'] = '< {} >'.format(text)
btn['font'] = font.nametofont('TkFixedFont')
btn['indicatoron'] = 0
btn.pack(side=tk.LEFT)
self.radio_buttons.append(btn)
self.frame_radio.pack(anchor=tk.CENTER, pady=4)
# Label with status information
self.tk_status = tk.StringVar()
self.label_status = tk.Label(
dlg,
textvariable=self.tk_status,
anchor=tk.W,
justify=tk.LEFT,
font=font.nametofont('TkFixedFont')
)
self.label_status.pack(fill=tk.X, padx=4, pady=4)
# Center window
_center_window(self.root, dlg)
self.root.deiconify() # show window
# Disable keyboard focus on all widgets ...
self._set_option_to_all_children(dlg, 'takefocus', 0)
# ... except for main ListBox
self.list['takefocus'] = 1
self.list.focus_set()
# Bind keys
dlg.bind('<Escape>', self.handle_keypress)
dlg.bind('<space>', self.handle_keypress)
dlg.bind('<Return>', self.handle_keypress)
dlg.bind('<Right>', self.handle_keypress)
dlg.bind('<Left>', self.handle_keypress)
dlg.bind('<Up>', self.handle_keypress)
dlg.bind('<Down>', self.handle_keypress)
dlg.bind('n', self.handle_keypress)
dlg.bind('m', self.handle_keypress)
dlg.bind('y', self.handle_keypress)
# Register callback that's called when window closes
dlg.wm_protocol('WM_DELETE_WINDOW', self._close_window)
# Init fields
self.node = None
self.node_stack = []
self.all_entries = []
self.shown_entries = []
self.config_path = None
self.unsaved_changes = False
self.status_string = 'NEW CONFIG'
self.update_status()
# Display first child of top level node (the top level node is 'mainmenu')
self.show_node(self.kconfig.top_node)
def _set_option_to_all_children(self, widget, option, value):
widget[option] = value
for n,c in widget.children.items():
self._set_option_to_all_children(c, option, value)
def _invert_colors(self, idx):
self.list.itemconfig(idx, {'bg' : self.list['foreground']})
self.list.itemconfig(idx, {'fg' : self.list['background']})
@property
def _selected_entry(self):
# type: (...) -> ListEntry
active_idx = self.list.index(tk.ACTIVE)
if active_idx >= 0 and active_idx < len(self.shown_entries):
return self.shown_entries[active_idx]
return None
def _select_node(self, node):
# type: (kconfiglib.MenuNode) -> None
"""
Attempts to select entry that corresponds to given MenuNode in main listbox
"""
idx = None
for i, e in enumerate(self.shown_entries):
if e.node is node:
idx = i
break
if idx is not None:
self.list.activate(idx)
self.list.see(idx)
self._invert_colors(idx)
def handle_keypress(self, ev):
keysym = ev.keysym
if keysym == 'Left':
self._select_action(prev=True)
elif keysym == 'Right':
self._select_action(prev=False)
elif keysym == 'Up':
self.refresh_display(reset_selection=False)
elif keysym == 'Down':
self.refresh_display(reset_selection=False)
elif keysym == 'space':
self._selected_entry.toggle()
elif keysym in ('n', 'm', 'y'):
self._selected_entry.set_tristate_value(kconfiglib.STR_TO_TRI[keysym])
elif keysym == 'Return':
action = self.tk_selected_action.get()
if action == self.ACTION_SELECT:
self._selected_entry.select()
elif action == self.ACTION_EXIT:
self._action_exit()
elif action == self.ACTION_HELP:
self._selected_entry.show_help()
elif action == self.ACTION_LOAD:
if self.prevent_losing_changes():
self.open_config()
elif action == self.ACTION_SAVE:
self.save_config()
elif action == self.ACTION_SAVE_AS:
self.save_config(force_file_dialog=True)
elif keysym == 'Escape':
self._action_exit()
pass
def _close_window(self):
if self.prevent_losing_changes():
print('Exiting..')
if self.__silent is True:
return
self.root.destroy()
def _action_exit(self):
if self.node_stack:
self.show_parent()
else:
self._close_window()
def _select_action(self, prev=False):
# Determine the radio button that's activated
action = self.tk_selected_action.get()
if prev:
action -= 1
else:
action += 1
action %= len(MenuConfig.ACTIONS)
self.tk_selected_action.set(action)
def _collect_list_entries(self, start_node, indent=0):
"""
Given first MenuNode of nodes list at some level in menu hierarchy,
collects nodes that may be displayed when viewing and editing that
hierarchy level. Includes implicit menu nodes, i.e. the ones dependent
on 'config' entry via 'if' statement which are internally represented
as children of their dependency
"""
entries = []
n = start_node
while n is not None:
entries.append(ListEntry(self, n, indent))
# If node refers to a symbol (X) and has children, it is either
# 'config' or 'menuconfig'. The children are items inside 'if X'
# block that immediately follows 'config' or 'menuconfig' entry.
# If it's a 'menuconfig' then corresponding MenuNode is shown as a
# regular menu entry. But if it's a 'config', then its children need
# to be shown in the same list with their texts indented
if (n.list is not None
and isinstance(n.item, kconfiglib.Symbol)
and n.is_menuconfig == False):
entries.extend(
self._collect_list_entries(n.list, indent=indent + 1)
)
n = n.next
return entries
def refresh_display(self, reset_selection=False):
# Refresh list entries' attributes
for e in self.all_entries:
e.refresh()
# Try to preserve selection upon refresh
selected_entry = self._selected_entry
# Also try to preserve listbox scroll offset
# If not preserved, the see() method will make wanted item to appear
# at the bottom of the list, even if previously it was in center
scroll_offset = self.list.yview()[0]
# Show only visible entries
self.shown_entries = [e for e in self.all_entries if e.visible]
# Refresh listbox contents
self.list.delete(0, tk.END)
self.list.insert(0, *self.shown_entries)
if selected_entry and not reset_selection:
# Restore scroll position
self.list.yview_moveto(scroll_offset)
# Activate previously selected node
self._select_node(selected_entry.node)
else:
# Select the topmost entry
self.list.activate(0)
self._invert_colors(0)
# Select ACTION_SELECT on each refresh (mimic C menuconfig)
self.tk_selected_action.set(self.ACTION_SELECT)
# Display current location in configuration tree
pos = []
for n in self.node_stack + [self.node]:
pos.append(n.prompt[0] if n.prompt else '[none]')
self.label_position['text'] = u'# ' + u' -> '.join(pos)
def show_node(self, node):
self.node = node
if node.list is not None:
self.all_entries = self._collect_list_entries(node.list)
else:
self.all_entries = []
self.refresh_display(reset_selection=True)
def show_submenu(self, node):
self.node_stack.append(self.node)
self.show_node(node)
def show_parent(self):
if self.node_stack:
select_node = self.node
parent_node = self.node_stack.pop()
self.show_node(parent_node)
# Restore previous selection
self._select_node(select_node)
self.refresh_display(reset_selection=False)
def ask_for_string(self, ident=None, title='Enter string', value=None):
"""
Raises dialog with text entry widget and asks user to enter string
Return:
- str - user entered string
- None - entry was cancelled
"""
text = 'Please enter a string value\n' \
'User <Enter> key to accept the value\n' \
'Use <Esc> key to cancel entry\n'
d = EntryDialog(self.root, text, title, ident=ident, value=value)
self.root.wait_window(d.dlg)
self.list.focus_set()
return d.value
def ask_for_int(self, ident=None, title='Enter integer value', value=None, ranges=()):
"""
Raises dialog with text entry widget and asks user to enter decimal number
Ranges should be iterable of tuples (start, end),
where 'start' and 'end' specify allowed value range (inclusively)
Return:
- int - when valid number that falls within any one of specified ranges is entered
- None - invalid number or entry was cancelled
"""
text = 'Please enter a decimal value. Fractions will not be accepted\n' \
'User <Enter> key to accept the value\n' \
'Use <Esc> key to cancel entry\n'
d = EntryDialog(self.root, text, title, ident=ident, value=value)
self.root.wait_window(d.dlg)
self.list.focus_set()
ivalue = None
if d.value:
try:
ivalue = int(d.value)
except ValueError:
messagebox.showerror('Bad value', 'Entered value \'{}\' is not an integer'.format(d.value))
if ivalue is not None and ranges:
allowed = False
for start, end in ranges:
allowed = allowed or start <= ivalue and ivalue <= end
if not allowed:
messagebox.showerror(
'Bad value',
'Entered value \'{:d}\' is out of range\n'
'Allowed:\n{}'.format(
ivalue,
'\n'.join([' {:d} - {:d}'.format(s,e) for s,e in ranges])
)
)
ivalue = None
return ivalue
def ask_for_hex(self, ident=None, title='Enter hexadecimal value', value=None, ranges=()):
"""
Raises dialog with text entry widget and asks user to enter decimal number
Ranges should be iterable of tuples (start, end),
where 'start' and 'end' specify allowed value range (inclusively)
Return:
- int - when valid number that falls within any one of specified ranges is entered
- None - invalid number or entry was cancelled
"""
text = 'Please enter a hexadecimal value\n' \
'User <Enter> key to accept the value\n' \
'Use <Esc> key to cancel entry\n'
d = EntryDialog(self.root, text, title, ident=ident, value=value)
self.root.wait_window(d.dlg)
self.list.focus_set()
hvalue = None
if d.value:
try:
hvalue = int(d.value, base=16)
except ValueError:
messagebox.showerror('Bad value', 'Entered value \'{}\' is not a hexadecimal value'.format(d.value))
if hvalue is not None and ranges:
allowed = False
for start, end in ranges:
allowed = allowed or start <= hvalue and hvalue <= end
if not allowed:
messagebox.showerror(
'Bad value',
'Entered value \'0x{:x}\' is out of range\n'
'Allowed:\n{}'.format(
hvalue,
'\n'.join([' 0x{:x} - 0x{:x}'.format(s,e) for s,e in ranges])
)
)
hvalue = None
return hvalue
def show_text(self, text, title='Info'):
"""
Raises dialog with read-only text view that contains supplied text
"""
d = TextDialog(self.root, text, title)
self.root.wait_window(d.dlg)
self.list.focus_set()
def mark_as_changed(self):
"""
Marks current config as having unsaved changes
Should be called whenever config value is changed
"""
self.unsaved_changes = True
self.update_status()
def set_status_string(self, status):
"""
Sets status string displayed at the bottom of the window
"""
self.status_string = status
self.update_status()
def update_status(self):
"""
Updates status bar display
Status bar displays:
- unsaved status
- current config path
- status string (see set_status_string())
"""
if self.__silent is True:
return
self.tk_status.set('{} [{}] {}'.format(
'<UNSAVED>' if self.unsaved_changes else '',
self.config_path if self.config_path else '',
self.status_string
))
def _check_is_visible(self, node):
v = True
v = v and node.prompt is not None
# It should be enough to check if prompt expression is not false and
# for menu nodes whether 'visible if' is not false
v = v and kconfiglib.expr_value(node.prompt[1]) > 0
if node.item == kconfiglib.MENU:
v = v and kconfiglib.expr_value(node.visibility) > 0
# If node references Symbol, then we also account for symbol visibility
# TODO: need to re-think whether this is needed
if isinstance(node.item, kconfiglib.Symbol):
if node.item.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
v = v and len(node.item.assignable) > 0
else:
v = v and node.item.visibility > 0
return v
def config_is_changed(self):
is_changed = False
node = self.kconfig.top_node.list
if not node:
# Empty configuration
return is_changed
while 1:
item = node.item
if isinstance(item, kconfiglib.Symbol) and item.user_value is None and self._check_is_visible(node):
is_changed = True
print("Config \"# {}\" has changed, need save config file\n".format(node.prompt[0]))
break;
# Iterative tree walk using parent pointers
if node.list:
node = node.list
elif node.next:
node = node.next
else:
while node.parent:
node = node.parent
if node.next:
node = node.next
break
else:
break
return is_changed
def prevent_losing_changes(self):
"""
Checks if there are unsaved changes and asks user to save or discard them
This routine should be called whenever current config is going to be discarded
Raises the usual 'Yes', 'No', 'Cancel' prompt.
Return:
- True: caller may safely drop current config state
- False: user needs to continue work on current config ('Cancel' pressed or saving failed)
"""
if self.config_is_changed() == True:
self.mark_as_changed()
if not self.unsaved_changes:
return True
if self.__silent:
saved = self.save_config()
return saved
res = messagebox.askyesnocancel(
parent=self.root,
title='Unsaved changes',
message='Config has unsaved changes. Do you want to save them?'
)
if res is None:
return False
elif res is False:
return True
# Otherwise attempt to save config and succeed only if config has been saved successfully
saved = self.save_config()
return saved
def open_config(self, path=None):
if path is None:
# Create open dialog. Either existing file is selected or no file is selected as a result
path = filedialog.askopenfilename(
parent=self.root,
title='Open config..',
initialdir=os.path.dirname(self.config_path) if self.config_path else os.getcwd(),
filetypes=(('.config files', '*.config'), ('All files', '*.*'))
)
if not path or not os.path.isfile(path):
return False
path = os.path.abspath(path)
print('Loading config: \'{}\''.format(path))
# Try to open given path
# If path does not exist, we still set current config path to it but don't load anything
self.unsaved_changes = False
self.config_path = path
if not os.path.exists(path):
self.set_status_string('New config')
self.mark_as_changed()
return True
# Load config and set status accordingly
try:
self.kconfig.load_config(path)
except IOError as e:
self.set_status_string('Failed to load: \'{}\''.format(path))
if not self.__silent:
self.refresh_display()
print('Failed to load config \'{}\': {}'.format(path, e))
return False
self.set_status_string('Opened config')
if not self.__silent:
self.refresh_display()
return True
def save_config(self, force_file_dialog=False):
path = self.config_path
if path is None or force_file_dialog:
path = filedialog.asksaveasfilename(
parent=self.root,
title='Save config as..',
initialdir=os.path.dirname(self.config_path) if self.config_path else os.getcwd(),
initialfile=os.path.basename(self.config_path) if self.config_path else None,
defaultextension='.config',
filetypes=(('.config files', '*.config'), ('All files', '*.*'))
)
if not path:
return False
path = os.path.abspath(path)
print('Saving config: \'{}\''.format(path))
# Try to save config to selected path
try:
self.kconfig.write_config(path, header="#\n# Automatically generated file; DO NOT EDIT.\n")
self.unsaved_changes = False
self.config_path = path
self.set_status_string('Saved config')
except IOError as e:
self.set_status_string('Failed to save: \'{}\''.format(path))
print('Save failed: {}'.format(e), file=sys.stderr)
return False
return True
def _center_window(root, window):
# type: (tk.Tk, tk.Toplevel) -> None
"""
Attempts to center window on screen
"""
root.update_idletasks()
# root.eval('tk::PlaceWindow {!s} center'.format(
# window.winfo_pathname(window.winfo_id())
# ))
w = window.winfo_width()
h = window.winfo_height()
ws = window.winfo_screenwidth()
hs = window.winfo_screenheight()
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
window.geometry('+{:d}+{:d}'.format(int(x), int(y)))
window.lift()
window.focus_force()
def _center_window_above_parent(root, window):
# type: (tk.Tk, tk.Toplevel) -> None
"""
Attempts to center window above its parent window
"""
# root.eval('tk::PlaceWindow {!s} center'.format(
# window.winfo_pathname(window.winfo_id())
# ))
root.update_idletasks()
parent = window.master
w = window.winfo_width()
h = window.winfo_height()
px = parent.winfo_rootx()
py = parent.winfo_rooty()
pw = parent.winfo_width()
ph = parent.winfo_height()
x = px + (pw / 2) - (w / 2)
y = py + (ph / 2) - (h / 2)
window.geometry('+{:d}+{:d}'.format(int(x), int(y)))
window.lift()
window.focus_force()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# Instantiate cmd options parser
parser = argparse.ArgumentParser(
description='Interactive Kconfig configuration editor'
)
parser.add_argument(
'--kconfig',
metavar='FILE',
type=str,
default='Kconfig',
help='path to root Kconfig file'
)
parser.add_argument(
'--config',
metavar='FILE',
type=str,
help='path to .config file to load'
)
if "--silent" in argv:
parser.add_argument(
'--silent',
dest = '_silent_',
type=str,
help='silent mode, not show window'
)
args = parser.parse_args(argv)
kconfig_path = args.kconfig
config_path = args.config
# Verify that Kconfig file exists
if not os.path.isfile(kconfig_path):
raise RuntimeError('\'{}\': no such file'.format(kconfig_path))
# Parse Kconfig files
kconf = kconfiglib.Kconfig(filename=kconfig_path)
if "--silent" not in argv:
print("In normal mode. Will show menuconfig window.")
mc = MenuConfig(kconf)
# If config file was specified, load it
if config_path:
mc.open_config(config_path)
print("Enter mainloop. Waiting...")
tk.mainloop()
else:
print("In silent mode. Don`t show menuconfig window.")
mc = MenuConfig(kconf, True)
# If config file was specified, load it
if config_path:
mc.open_config(config_path)
mc._close_window()
if __name__ == '__main__':
main()
| {
"content_hash": "89aabb5cda8030e5e70a818a54523e7e",
"timestamp": "",
"source": "github",
"line_count": 1203,
"max_line_length": 116,
"avg_line_length": 37.05985037406484,
"alnum_prop": 0.5644303882645851,
"repo_name": "wolfgangz2013/rt-thread",
"id": "67893d72812b8ce320d2ea97b054f78917655dfd",
"size": "44639",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/pymenuconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "16995634"
},
{
"name": "Batchfile",
"bytes": "179660"
},
{
"name": "C",
"bytes": "705913788"
},
{
"name": "C++",
"bytes": "7764376"
},
{
"name": "CMake",
"bytes": "148026"
},
{
"name": "CSS",
"bytes": "9978"
},
{
"name": "DIGITAL Command Language",
"bytes": "13234"
},
{
"name": "GDB",
"bytes": "11796"
},
{
"name": "HTML",
"bytes": "6039932"
},
{
"name": "Lex",
"bytes": "7026"
},
{
"name": "Logos",
"bytes": "7078"
},
{
"name": "M4",
"bytes": "17515"
},
{
"name": "Makefile",
"bytes": "268009"
},
{
"name": "Module Management System",
"bytes": "1548"
},
{
"name": "Objective-C",
"bytes": "4093973"
},
{
"name": "Pawn",
"bytes": "1427"
},
{
"name": "Perl",
"bytes": "9520"
},
{
"name": "Python",
"bytes": "1203710"
},
{
"name": "RPC",
"bytes": "14162"
},
{
"name": "Roff",
"bytes": "4486"
},
{
"name": "Ruby",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "407723"
},
{
"name": "TeX",
"bytes": "3113"
},
{
"name": "Yacc",
"bytes": "16084"
}
],
"symlink_target": ""
} |
from django.db import models
class Proveedores(models.Model):
pass | {
"content_hash": "a5eb47cfbd727039efa7a82770872836",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 17.75,
"alnum_prop": 0.7746478873239436,
"repo_name": "urkh/erp",
"id": "9ffe42dfec4eaaa65256e1e2f05e166e7109c873",
"size": "71",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modulos/proveedores/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "367922"
},
{
"name": "JavaScript",
"bytes": "1780907"
},
{
"name": "PHP",
"bytes": "170634"
},
{
"name": "Python",
"bytes": "14758"
}
],
"symlink_target": ""
} |
from .creation import sparse_coo_tensor
from .creation import sparse_csr_tensor
from .unary import sin
from .unary import tan
from .unary import asin
from .unary import atan
from .unary import sinh
from .unary import tanh
from .unary import asinh
from .unary import atanh
from .unary import sqrt
from .unary import square
from .unary import log1p
from .unary import abs
from .unary import pow
from .unary import cast
from .unary import neg
from .unary import coalesce
from .unary import deg2rad
from .unary import rad2deg
from .unary import expm1
from .unary import transpose
from .unary import reshape
from .binary import mv
from .binary import matmul
from .binary import masked_matmul
from .binary import add
from .binary import divide
from .binary import multiply
from .binary import subtract
from .binary import is_same_shape
from .multiary import addmm
from . import nn
__all__ = [
'sparse_coo_tensor',
'sparse_csr_tensor',
'sin',
'tan',
'asin',
'atan',
'sinh',
'tanh',
'asinh',
'atanh',
'sqrt',
'square',
'log1p',
'abs',
'pow',
'cast',
'neg',
'deg2rad',
'rad2deg',
'expm1',
'mv',
'matmul',
'masked_matmul',
'addmm',
'add',
'subtract',
'transpose',
'multiply',
'divide',
'coalesce',
'is_same_shape',
'reshape',
]
| {
"content_hash": "8e1812d07aa729ec169756b6a9ab9924",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 39,
"avg_line_length": 18.76388888888889,
"alnum_prop": 0.6661732050333087,
"repo_name": "luotao1/Paddle",
"id": "9ca932ac46b6ada15d338b2018c2716a7a25efcb",
"size": "1964",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/sparse/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
from Globals import *
import pygame.freetype as freetype
"""
the class used to encapsulate all needed elements of a tile
"""
class Tile:
"""
class constructor to declare and initialize tile elements
_pieceInfo -> describes the tile
"""
def __init__(self, _pieceInfo):
global G_gameInfo
self.lv = _pieceInfo["lv"] # tile level
self.text = _pieceInfo["text"] # caption inside the tile
self.bcolor = _pieceInfo["bcolor"] # background color of tile
self.tcolor = _pieceInfo["tcolor"] # text color of caption inside
self.tsize = _pieceInfo["tsize"] # text size
self.position = (0, 0) # position of tile, only needed for animation
self.moveRate = (0, 0) # how much per second the tile moves
self.font = freetype.Font( G_gameInfo["tileFont"], self.tsize )
self.spawnTimer = 0 # animation timer for spawn animation
self.spawnTimerMax = 0.2 # how much spawn animation lasts
self.size = G_gameInfo["sqaureSize"] # tile size
self.sizeScale = 0.3 # max percent that will be added to size on animation
self.canDraw = False # used to delay the spawn when combining 2 tiles
"""
this function is called each frame to draw the actual tile
_surface -> where the tile will be rendered, part of pygame module
"""
def Draw(self, _surface):
global G_gameInfo
# draw the tile after the spawn delay is over
if self.canDraw:
# we need to find the top-left corner of the tile
offset = (self.size - G_gameInfo["sqaureSize"]) * 0.5
posX = self.position[0] - offset
posY = self.position[1] - offset
# we draw a square as tile
rect = ((posX, posY), (self.size, self.size))
pygame.draw.rect(_surface, self.bcolor, rect)
# we draw the text inside the tile, right in the center of it
label = self.font.render(self.text, self.tcolor)
wLabel = label[1].width
hLabel = label[1].height
left = posX + self.size/2 - wLabel/2
top = posY + self.size/2 - hLabel/2
_surface.blit(label[0], (left, top))
"""
this function is only used for animation, has no effect on actual gameplay
it is used to update the position where the tile is rendered
_deltaTime -> time difference between this frame and last one
"""
def Update(self, _deltaTime):
# there are 2 cases:
# spawn animation
# move animation
# if the spawn timer is 0, then we do move animation
if self.spawnTimer <= 0:
self.size = G_gameInfo["sqaureSize"]
left = self.position[0] + self.moveRate[0] * _deltaTime
top = self.position[1] + self.moveRate[1] * _deltaTime
self.position = (left, top)
else: # else we do spawn animation
# we wait for spawn delay
if (self.spawnTimer <= self.spawnTimerMax):
self.canDraw = True
if self.canDraw:
if self.spawnTimer > self.spawnTimerMax/2:
# make the tile bigger
self.size += G_gameInfo["sqaureSize"] * (self.sizeScale / self.spawnTimerMax * _deltaTime)
else:
# make the tile smaller
self.size -= G_gameInfo["sqaureSize"] * (self.sizeScale / self.spawnTimerMax * _deltaTime)
# update the spawn timer
self.spawnTimer -= _deltaTime
"""
this function is used to set the drawing position of the tile
_destSquare -> position indexes of the new spot in matrix
"""
def SetPosition(self, _destSquare):
global G_gameInfo
left = G_gameInfo["tablePos"][0] + G_gameInfo["tableBorderSize"] * (_destSquare[1]+1) + G_gameInfo["sqaureSize"] * _destSquare[1]
top = G_gameInfo["tablePos"][1] + G_gameInfo["tableBorderSize"] * (_destSquare[0]+1) + G_gameInfo["sqaureSize"] * _destSquare[0]
self.position = (left, top)
"""
this function calculates the speed the tile will move with, on moving animation
_destSquare -> position indexes of the new spot in matrix
"""
def MoveTo(self, _destSquare):
global G_gameInfo
left = G_gameInfo["tablePos"][0] + G_gameInfo["tableBorderSize"] * (_destSquare[1]+1) + G_gameInfo["sqaureSize"] * _destSquare[1]
top = G_gameInfo["tablePos"][1] + G_gameInfo["tableBorderSize"] * (_destSquare[0]+1) + G_gameInfo["sqaureSize"] * _destSquare[0]
# we want the animation to last a certain amount of time
onX = (left - self.position[0]) / G_gameInfo["animSeconds"]
onY = (top - self.position[1]) / G_gameInfo["animSeconds"]
self.moveRate = (onX, onY)
"""
this function prepares the spawn animation and starts the timer
"""
def StartSpawn(self):
global G_gameInfo
# we also add the spawn delay, which is equal to how much move animation lasts
# we want the spawn animation to start after the move is over
self.spawnTimer = self.spawnTimerMax + G_gameInfo["animSeconds"]
self.canDraw = False
| {
"content_hash": "338255f9a5509afc0e714e7fe508244e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 131,
"avg_line_length": 39.401709401709404,
"alnum_prop": 0.6865509761388287,
"repo_name": "MorcoFreeCode/2014__2048-Game",
"id": "0391d20d4f631d5197e99c72eb62ca956390e27b",
"size": "4610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "full_project/source code/Tile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25114"
}
],
"symlink_target": ""
} |
import os
import re
import datetime
import logging
from copy import deepcopy
from collections import Mapping, MutableMapping
import json
import yaml
import rethinkdb
import shotgun_api3 as sg
__all__ = [
'ShotgunAPIWrapper',
'convertStrToDatetime',
'addNumberSign',
'getBaseEntity',
'prettyJson',
'chunks',
'combine_dict',
'update_dict',
'EncodedDict',
'DeepDict',
'sortMultiEntityFieldsByID',
'get_dict_diff',
'get_deep_keys',
'has_deep_key',
'get_deep_item',
'set_deep_item',
'del_deep_item',
'Config',
'History',
]
LOG = logging.getLogger(__name__)
class ShotgunAPIWrapper(sg.Shotgun):
"""
Wrapper for shotgun that disables the date time instance creation
Returning the raw data from json
"""
def _transform_inbound(self, data):
# Skip transforming inbound data so it correctly matches for our proxy
return data
def convertStrToDatetime(dateStr):
return datetime.datetime(*map(int, re.split('[^\d]', dateStr)[:-1]))
def sortMultiEntityFieldsByID(schema, entity):
"""
Sort all multi-entity fields in an entity by their ID.
Args:
schema (dict): Shotgun Schema
entity (dict): Entity dictionary
Returns:
dict: entity with fields sorted
"""
result = {}
entitySchema = schema[entity['type']]
for field, val in entity.items():
if field in ['id', 'type']:
result[field] = val
continue
fieldSchema = entitySchema[field]
dataType = fieldSchema['data_type']['value']
if dataType == 'multi_entity':
val = sorted(val, key=lambda e: e['id'])
result[field] = val
return result
def addNumberSign(num):
if num > 0:
num = '+' + str(num)
elif num < 0:
num = '-' + str(abs(num))
return num
def getBaseEntity(entity):
"""
Remove extra information from an entity dict
keeping only type and id
"""
if entity is None:
return entity
return dict([(k, v) for k, v in entity.items() if k in ['id', 'type']])
def get_dict_diff(a, b):
"""
Get the differences between a, and b
Supports nested dictionaries as well.
>>> a = dict(
... myBool = True,
... myDict = {1:'a', 2:'b'},
... )
>>> b = dict(
... myBool = False,
... myDict = {3:'c'},
... myString = 'hi'
... )
>>> get_dict_diff(b, a)
{'myString': 'hi', 'myDict': {3: 'c'}, 'myBool': False}
>>> a['myBool'] = False
>>> get_dict_diff(b, a)
{'myString': 'hi', 'myDict': {3: 'c'}}
"""
diff = {}
for k, a_value in a.items():
if k in b.keys():
b_value = b[k]
if a_value == b_value:
continue
else:
# Check for a nested dict
# If so, compare values inside it
if isinstance(a_value, MutableMapping):
# set any nested differences
nested_diff = get_dict_diff(a_value, b_value)
if not nested_diff:
continue
diff[k] = nested_diff
# If it hasn't been added to the diff as a nested diff
# add it now
if k not in diff:
diff[k] = a_value
return diff
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def combine_dict(a, b, copy=True):
"""
Return a dict that is the result of recursively
updating dict `a` with dict `b`. Performs a deep
copy to avoid altering the given objects.
"""
result = deepcopy(a)
update_dict(result, b, copy=copy)
return result
def prettyJson(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def update_dict(a, b, copy=True):
"""
Update dictionary A with B recursively.
This means that dictionary values are not
simply replaced, but updated as well.
`copy` - if True, uses a copy of B before updating
so that if A is changed it will not affect any
elements of B
>>> a = dict(
... myBool = True,
... myDict = {1:'a', 2:'b'},
... )
>>> b = dict(
... myBool = False,
... myDict = {3:'c'},
... myString = 'hi'
... )
>>> update_dict(a, b)
>>> a
{'myDict': {1: 'a', 2: 'b', 3: 'c'}, 'myBool': False, 'myString': 'hi'}
"""
if copy:
b = deepcopy(b)
for k in b.keys():
if isinstance(b[k], Mapping) and k in a and isinstance(a[k], MutableMapping):
# update existing key
update_dict(a[k], b[k])
else:
# assign new key
a[k] = b[k]
return a
class EncodedDict(MutableMapping):
"""
This is an abstract class for any dict-like classes
that support holding data that is encoded and
decoded on get and set. This means that data
is changed at access time, so the EncodedDict provides
a standardized way to get at the raw data, if needed.
Subclasses must store the core data in `_data`,
or override the `data` property to return
the raw data, which must not be a copy
"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __getitem__(self, key):
return self.encode(self._data.__getitem__(key))
def __setitem__(self, key, value):
return self._data.__setitem__(key, value)
def __delitem__(self, key):
return self._data.__delitem__(key)
def __contains__(self, key):
return self._data.__contains__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
def encode(self, value):
"""
Encode and return the given value.
Override this method to implement custom encoding
"""
return value
def __repr__(self):
"""
Return the repr of the encoded dictionary
"""
return dict(self).__repr__()
@property
def data(self):
return self._data
class DeepDict(EncodedDict):
"""
Basic dictionary that allows you to get child items
at any depth using a dot syntax, eg. 'my.deep.key'.
All keys must be non-empty strings that do not contain '.'
When setting values, the child-most dictionary must
already exist
>>> d = DeepDict({1:2})
>>> d = DeepDict(
... a=4,
... b=5,
... c=dict(
... d=dict(
... e=10
... )
... )
... )
>>> d['c.d']
{'e': 10}
>>> d['c.d.e']
10
>>> d['c.d.e'] = 3
>>> d
{'a': 4, 'c': {'d': {'e': 3}}, 'b': 5}
>>> d.has_key('c.d.e')
True
>>> # overwrite existing integer value with another nested layer
>>> d['c.d.e.f'] = 7
>>> d
{'a': 4, 'c': {'d': {'e': 3}}, 'b': 5}
>>> # add new nested item directly
>>> d['1.2.3'] = 5
>>> d
{'a': 4, '1': {'2': {'3': 5}}, 'c': {'d': {'e': 3}}, 'b': 5}
>>> d.keys()
['a', '1', 'c', 'b']
>>> d.deep_keys()
['a', '1.2.3', 'c.d.e', 'b']
>>> del d['c.d']
>>> d
{'a': 4, '1': {'2': {'3': 5}}, 'c': {}, 'b': 5}
>>> # access raw data using deep key
>>> d.get_raw('1.2.3')
5
"""
def __getitem__(self, k):
return self.encode(get_deep_item(self.data, k))
def __contains__(self, key):
return has_deep_key(self._data, key)
def __setitem__(self, k, v):
set_deep_item(self.data, k, v)
def __delitem__(self, k):
del_deep_item(self.data, k)
def has_key(self, key):
return self.__contains__(key)
def deep_keys(self):
return get_deep_keys(self)
def get_raw(self, key):
return get_deep_item(self.data, key)
def get_deep_keys(dict):
keys = []
for k, v in dict.items():
if isinstance(v, Mapping):
deepKeys = get_deep_keys(v)
keys.extend(['{0}.{1}'.format(k, deepK) for deepK in deepKeys])
else:
keys.append(k)
return keys
def has_deep_key(dict, key):
keys = key.split('.', 1)
if keys[0] in dict:
if len(keys) == 1:
return True
v = dict[keys[0]]
if isinstance(v, Mapping):
return has_deep_key(v, keys[1])
return False
def get_deep_item(d, k, sep='.'):
"""
Return the value for `k` from the dictionary `d`,
by splitting the key and searching recursively
"""
if not isinstance(k, basestring):
raise KeyError('expected string, got {0}: {1}'.format(type(k).__name__, k))
val = d
# recursively look for dictionary values, then
# return the last value
for key in k.split(sep):
if key and isinstance(val, Mapping) and key in val:
val = val.__getitem__(key)
else:
raise KeyError(k)
return val
def set_deep_item(d, k, v, sep='.'):
"""
Recurse into the dictionary `d` by splitting
key `k` by `sep` and setting dictionary values appropriately.
Will create or override intermediate key values if they
are not dictionaries
"""
if not isinstance(k, basestring):
raise KeyError('expected string, got {0}: {1}'.format(type(k).__name__, k))
# split and validate key
keys = k.split(sep)
for key in keys:
if not key:
raise KeyError(k)
# loop through and get/create dictionary
# items for all but the last key
val = d
for key in keys[:-1]:
if key not in val:
# create new dictionary item for key
val[key] = {}
val = dict.__getitem__(val, key)
# force into being a dictionary
if not isinstance(val, MutableMapping):
val = {}
val.__setitem__(keys[-1], v)
def del_deep_item(d, k, sep='.'):
"""
Recurse into the dictionary `d` by splitting
key `k` by `sep` and deleting the value at the last key
"""
if not isinstance(k, basestring):
raise KeyError('expected string, got {0}: {1}'.format(type(k).__name__, k))
keys = k.split(sep)
val = d
for key in keys[:-1]:
if isinstance(val, MutableMapping) and key in val:
val = dict.__getitem__(val, key)
else:
raise KeyError(k)
val.__delitem__(keys[-1])
class Config(DeepDict):
"""
Main configuration dictionary for the shotgunCache
"""
_history = None
@classmethod
def loadFromYaml(cls, yamlPath):
result = yaml.load(open(yamlPath, 'r').read())
return cls(result)
def createShotgunConnection(self, raw=True, **kwargs):
cls = ShotgunAPIWrapper if raw else sg.Shotgun
kw = self['shotgun'].copy()
kw.update(kwargs)
sgConn = cls(
**kw
)
return sgConn
def createRethinkConnection(self, **kwargs):
kw = self['rethink'].copy()
kw.update(kwargs)
conn = rethinkdb.connect(**kw)
return conn
@property
def history(self):
if self._history is None:
self._history = History(self.historyPath)
return self._history
@history.setter
def history(self, value):
self._history = value
@property
def historyPath(self):
import main
configPath = os.environ.get(main.CONFIG_PATH_ENV_KEY)
historyPath = os.path.join(configPath, self['history_filename'])
return historyPath
@property
def entityConfigFolderPath(self):
import main
configPath = os.environ.get(main.CONFIG_PATH_ENV_KEY)
historyPath = os.path.join(configPath, self['entity_config_foldername'])
return historyPath
class History(DeepDict):
"""
Used to track the history state of the cache
Loads and saves to a yaml file
"""
def __init__(self, historyFilePath):
path = os.path.expanduser(historyFilePath)
path = os.path.abspath(path)
self.historyFilePath = path
super(History, self).__init__({})
self.load()
def load(self):
if os.path.exists(self.historyFilePath):
result = yaml.load(open(self.historyFilePath, 'r').read())
else:
LOG.info("No existing history file at {0}".format(self.historyFilePath))
result = {}
self._data = result
return result
def save(self):
with open(self.historyFilePath, 'w') as f:
yaml.dump(dict(self), f, default_flow_style=False, indent=4)
| {
"content_hash": "c53b186325442a63f2dac74528cac14a",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 85,
"avg_line_length": 26.041322314049587,
"alnum_prop": 0.5478419549349413,
"repo_name": "moonbot/shotgun-cache-server",
"id": "421fe9fd8cdfac20ea060a3a3a6d3ce3886c1fd9",
"size": "12604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shotgunCache/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145018"
}
],
"symlink_target": ""
} |
"""
webtags.py
Created by QingFeng on 2008-03-23.
Copyright (c) 2008 xBayDNS Team. All rights reserved.
"""
from django import template
register = template.Library()
@register.simple_tag
def resultToHtml(results):
print results
html=''
for record,idcs in results.items():
s='%s'%record
for idc,ips in idcs.items():
s+='<li>%s:%s</li>'%(idc,','.join(ips))
html+='<ul>%s</ul>'%s
return '<ul>%s</ul>'%html
@register.simple_tag
def getResultTime():
import time,os
from xbaydns.conf import sysconf
CONF_FILE='%s/idcview/idcview.current'%sysconf.xbaydnsdb
try:
time_str = time.ctime(os.stat(CONF_FILE)[8])
except:
time_str = ''
return time_str
| {
"content_hash": "65adbbf555c1d421e7355f85dc7459e8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 22.96875,
"alnum_prop": 0.6204081632653061,
"repo_name": "changtailiang/xbaydns",
"id": "8187cf83ee77fbf01f03b31dc9e9763def8c8a2f",
"size": "753",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "xbaydnsweb/web/templatetags/webtags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "9972"
},
{
"name": "Python",
"bytes": "159997"
},
{
"name": "Shell",
"bytes": "23361"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ShortenerConfig(AppConfig):
name = 'shortener'
| {
"content_hash": "435f8168d4f1768d81dc261d0dd7d800",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.6,
"alnum_prop": 0.7634408602150538,
"repo_name": "hemor/tursh",
"id": "84b0a39f2ade5b05b2dbce10d522455a619028a9",
"size": "93",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "shortener/apps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4262"
},
{
"name": "JavaScript",
"bytes": "12187"
},
{
"name": "Nginx",
"bytes": "832"
},
{
"name": "Python",
"bytes": "18836"
},
{
"name": "Shell",
"bytes": "641"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
import nova.conf
from nova.i18n import _, _LI, _LW, _LE
from nova.servicegroup import api
from nova.servicegroup.drivers import base
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class DbDriver(base.Driver):
def __init__(self, *args, **kwargs):
self.service_down_time = CONF.service_down_time
def join(self, member, group, service=None):
"""Add a new member to a service group.
:param member: the joined member ID/name
:param group: the group ID/name, of the joined member
:param service: a `nova.service.Service` object
"""
LOG.debug('DB_Driver: join new ServiceGroup member %(member)s to '
'the %(group)s group, service = %(service)s',
{'member': member, 'group': group,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for DB based'
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
api.INITIAL_REPORTING_DELAY, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
last_heartbeat = (service_ref.get('last_seen_up') or
service_ref['created_at'])
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service_ref came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
# Timestamps in DB are UTC.
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= self.service_down_time
if not is_up:
LOG.debug('Seems service %(binary)s on host %(host)s is down. '
'Last heartbeat was %(lhb)s. Elapsed time is %(el)s',
{'binary': service_ref.get('binary'),
'host': service_ref.get('host'),
'lhb': str(last_heartbeat), 'el': str(elapsed)})
return is_up
def updated_time(self, service_ref):
"""Get the updated time from db"""
return service_ref['updated_at']
def _report_state(self, service):
"""Update the state of this service in the datastore."""
try:
service.service_ref.report_count += 1
service.service_ref.save()
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.info(
_LI('Recovered from being unable to report status.'))
except messaging.MessagingTimeout:
# NOTE(johngarbutt) during upgrade we will see messaging timeouts
# as nova-conductor is restarted, so only log this error once.
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.warning(_LW('Lost connection to nova-conductor '
'for reporting service status.'))
except Exception:
# NOTE(rpodolyaka): we'd like to avoid catching of all possible
# exceptions here, but otherwise it would become possible for
# the state reporting thread to stop abruptly, and thus leave
# the service unusable until it's restarted.
LOG.exception(
_LE('Unexpected error while reporting service status'))
# trigger the recovery log message, if this error goes away
service.model_disconnected = True
| {
"content_hash": "cdc0f1f705b3c73646e86d04f5e528c7",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 43.10204081632653,
"alnum_prop": 0.5963541666666666,
"repo_name": "jianghuaw/nova",
"id": "02cc2ee0092dc7dc1d9ca681fee01e01caa611ff",
"size": "4799",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/servicegroup/drivers/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
} |
import re
import os
import json
import logging
import threading
from flask import Flask, request, jsonify
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
from datetime import datetime
from rasa_nlu.model import Interpreter
from rasa_core.agent import Agent
from flask import Flask, request, jsonify
from datetime import datetime
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.channels import CollectingOutputChannel
from rasa_core.utils import EndpointConfig
logging.basicConfig(format="[NLULOGS] %(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s", level=logging.DEBUG)
application = Flask(__name__)
# Now load up the various interpreters and agents
opening_nlu = RasaNLUInterpreter('./nlu-opening/models/current/opening_nlu')
service_nlu = RasaNLUInterpreter('./core-services/models/current/services_nlu')
platform_nlu = RasaNLUInterpreter('./core-livewire/models/current/platform_nlu')
services_actions_endpoint = os.getenv('SERVICE_ACTION_ENDPOINT_URL', 'http://localhost:5055/webhook')
platform_actions_endpoint = os.getenv('PLATFORM_ACTION_ENDPOINT_URL', 'http://localhost:5055/webhook')
intent_domain_map = {
'find_services': 'service',
'request_knowledge': 'knowledge',
'call_meeting': 'action',
'call_vote': 'action',
'create_action_todo': 'action',
'create_info_todo': 'action',
'create_volunteer_todo': 'action',
'create_validation_todo': 'action',
'create_livewire': 'action',
'take_action': 'action'
}
domain_agents = {
"service": Agent.load('core-services/models/dialogue', interpreter = service_nlu, action_endpoint = EndpointConfig(services_actions_endpoint)),
# "knowledge": Agent.load('core-knowledge/models/dialogue', interpreter = RasaNLUInterpreter('core-knowledge/models/current/knowledge_nlu')),
"action" : Agent.load('core-livewire/models/dialogue', interpreter = platform_nlu, action_endpoint = EndpointConfig(platform_actions_endpoint))
}
CONFIDENCE_THRESHOLD = 0.7
def reset_all_agents(user_id):
for domain in domain_agents:
domain_agents[domain].handle_text('/restart', sender_id = user_id)
"""
Common response format: {
'domain': what this belongs to (non-empty),
'intent': best guess of the intent
'responses': what should be sent back,
'intent_list': the list of guessed intents, in order,
'entities': any entities passed back
}
"""
def reshape_nlu_result(domain, nlu_result):
return {
'domain': domain,
'intent': nlu_result['intent'],
'intent_list': nlu_result.get('intent_ranking', []),
'entities': nlu_result.get('entities', []),
'responses': []
}
def empty_result(domain, error):
return {
'domain': domain,
'intent': '',
'intent_list': [],
'entities': [],
'responses': [],
'error': error
}
def reshape_core_result(domain, core_results):
logging.info('reshaping core_result: {}'.format(core_results))
response_texts = []
response_menu = []
for core_result in core_results:
if 'text' in core_result and core_result['text'] == 'DUM_SPIRO_SPERO':
logging.warning('Found distress signal from %s domain. Initialising domain rerouting.' % domain)
return False
if 'text' in core_result and len(core_result['text']) > 0:
response_texts.append(core_result['text'])
if 'buttons' in core_result:
extracted_text = []
for idx, button in enumerate(core_result['buttons']):
extracted_text.append('{}. {}'.format(idx + 1, button['title']))
response_texts.extend(extracted_text)
response_menu = core_result['buttons']
logging.info('Extracted response texts: {}'.format(response_texts))
return {
'domain': domain,
'responses': response_texts,
'menu': response_menu
}
def error_catching_nlu_parse(user_message, interpreter):
logging.info('Parsing user message: %s', user_message)
try:
return interpreter.parse(user_message)
except ValueError as ErrorMesage:
logging.error('Error parsing! Value error')
logging.error('Error message: %s', ErrorMesage)
return interpreter.parse('')
@application.route('/status')
def say_hello():
return "Hello World! I am alive, on version 0-d. \n Service action URL is: {}, and platform action URL is: {}".format(services_actions_endpoint, platform_actions_endpoint)
@application.route('/restart', methods=['POST'])
def reset_user_session():
"""Resets all domains
Query params:
user_id (str): User ID to reset (required)
"""
user_id = request.args.get('user_id')
reset_all_agents(user_id)
logging.info('Completed restart for {}'.format(user_id))
return '', 200
@application.route('/province', methods=['GET'])
def parse_user_province():
"""
We use the services NLU for this, because it is by far the heaviest user of province selection
"""
user_message = request.args.get('message')
nlu_result = error_catching_nlu_parse(user_message, service_nlu)
resp = jsonify(reshape_nlu_result('service', nlu_result))
resp.status_code = 200
return resp
"""
@application.route('/evaluate', methods=['GET'])
def evaluate():
user_message = request.args.get('message')
nlu_result = error_catching_nlu_parse(user_message, opening_nlu)
NLU_THRESHOLD = 0.1
if nlu_result['intent']['confidence'] > NLU_THRESHOLD:
intent = nlu_result['intent']['name']
return json.dumps({'intent': intent})
else:
return json.dumps({'intent': None})
"""
@application.route('/opening/parse', methods=['GET'])
def parse_unknown_domain(*rerouted_message):
"""Parser when nothing is known about the message, i.e., at the start of a conversation (or at restart)
Query params:
message (str): The user message
Returns:
The best-guess intent, ranked intents, and any guessed entities, except for high-confidence results, in which case, domain parse result
"""
if rerouted_message:
user_message = rerouted_message[0]
else:
user_message = request.args.get('message')
nlu_result = error_catching_nlu_parse(user_message, opening_nlu)
primary_result = nlu_result['intent']
logging.info('NLU result on opening: %s', nlu_result)
result_as_response = reshape_nlu_result('opening', nlu_result)
nlu_only = request.args.get('nlu_only', default=False)
logging.info('Are we doing just an NLU parse? : %s', nlu_only)
if (primary_result['confidence'] > CONFIDENCE_THRESHOLD and not nlu_only):
# since we are now pretty sure of the result, check if we can skip straight into domain
if (primary_result['name'] in intent_domain_map):
domain = intent_domain_map[primary_result['name']]
logging.info('Short cutting straight to domain: %s', domain)
resp = parse_knowledge_domain(domain)
else:
resp = jsonify(result_as_response)
else:
resp = jsonify(result_as_response)
resp.status_code = 200
return resp
@application.route('/<domain>/parse', methods=['GET'])
def parse_knowledge_domain(domain):
"""Parser when it is known (or high-confidence guessed) that user is in the find knowledge domain
Params:
domain (str): The domain of the response (path variable)
message (str): The user message
user_id: The user ID, for tracking within Rasa core (can be any type of value, as long as consistent within session)
"""
user_message = request.args.get('message')
logging.info('Parsing {} in domain {}'.format(user_message, domain))
if domain not in domain_agents:
logging.error('Error! Sent invalid domain: {}'.format(domain))
response = jsonify(empty_result(domain, 'Invalid domain sent to core'))
response.status_code = 200 # error will trigger fail upstream, so rather return 'don't know what this means', in effect
return response
if 'user_id' in request.args:
user_id = request.args.get('user_id')
responses_to_user = domain_agents[domain].handle_text(user_message, sender_id=user_id)
else:
responses_to_user = domain_agents[domain].handle_text(user_message)
logging.info('raw response: {}'.format(responses_to_user))
# agent_response = { 'domain': domain, 'responses': responses_to_user }
# logging.info('Domain agent response: %s', agent_response)
to_send = responses_to_user if (responses_to_user and len(responses_to_user) > 0) else {}
reshaped_response = reshape_core_result(domain, to_send)
if not reshaped_response:
parse_unknown_domain(*[user_message])
logging.info('Newly reshaped response: {}'.format(reshaped_response))
resp = jsonify(reshaped_response)
resp.status_code = 200
return resp
@application.errorhandler(Exception)
def make_json_error(ex):
logging.error("Failure! : {}".format(ex))
response = jsonify(error=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
if __name__ == "__main__":
logging.info("Starting up Grassroot Rasa components")
application.debug = True
application.run(host='0.0.0.0')
| {
"content_hash": "c2fac87cf582da6e185dfe5f6e954bf8",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 175,
"avg_line_length": 36.36538461538461,
"alnum_prop": 0.6652564780539397,
"repo_name": "grassrootza/grassroot-learning",
"id": "69f8c392acd0f57ad70226ea035a17d3996762ef",
"size": "9455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "844"
},
{
"name": "Makefile",
"bytes": "6311"
},
{
"name": "Python",
"bytes": "47791"
},
{
"name": "Shell",
"bytes": "695"
}
],
"symlink_target": ""
} |
import re
# Bazel to CMake target name conversions used by bazel_to_cmake.py.
EXPLICIT_TARGET_MAPPING = {
# Internal utilities to emulate various binary/library options.
"//build_tools:default_linkopts": [],
"//build_tools:dl": ["${CMAKE_DL_LIBS}"],
"//compiler/src:defs": [],
"//compiler/src/iree/compiler/API:CAPI": ["IREECompilerCAPILib"],
"//runtime/src:runtime_defines": [],
# IREE llvm-external-projects
"//llvm-external-projects/iree-dialects:CAPI": ["IREEDialectsCAPI"],
# Disable all hard-coded codegen targets (they are expanded dynamically
# in CMake).
"@llvm-project//llvm:AArch64AsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:AArch64CodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:ARMAsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:ARMCodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:RISCVAsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:RISCVCodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:WebAssemblyAsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:WebAssemblyCodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:X86AsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:X86CodeGen": ["IREELLVMCPUTargetDeps"],
# LLD
"@llvm-project//lld": ["${IREE_LLD_TARGET}"],
"@llvm-project//lld:COFF": ["lldCOFF"],
"@llvm-project//lld:Common": ["lldCommon"],
"@llvm-project//lld:ELF": ["lldELF"],
"@llvm-project//lld:MachO": ["lldMachO"],
"@llvm-project//lld:Wasm": ["lldWasm"],
# LLVM
"@llvm-project//llvm:config": [],
"@llvm-project//llvm:IPO": ["LLVMipo"],
"@llvm-project//llvm:FileCheck": ["FileCheck"],
"@llvm-project//llvm:not": ["not"],
# MLIR
"@llvm-project//mlir:AllPassesAndDialects": ["MLIRAllDialects"],
"@llvm-project//mlir:DialectUtils": [""],
"@llvm-project//mlir:GPUDialect": ["MLIRGPUOps"],
"@llvm-project//mlir:GPUTransforms": ["MLIRGPUTransforms"],
"@llvm-project//mlir:LinalgStructuredOpsIncGen": [
"MLIRLinalgStructuredOpsIncGenLib"
],
"@llvm-project//mlir:ShapeTransforms": ["MLIRShapeOpsTransforms"],
"@llvm-project//mlir:ToLLVMIRTranslation": ["MLIRTargetLLVMIRExport"],
"@llvm-project//mlir:mlir-translate": ["mlir-translate"],
"@llvm-project//mlir:MlirLspServerLib": ["MLIRLspServerLib"],
"@llvm-project//mlir:MlirTableGenMain": ["MLIRTableGen"],
"@llvm-project//mlir:MlirOptLib": ["MLIROptLib"],
"@llvm-project//mlir:VectorOps": ["MLIRVector"],
# MHLO.
# TODO: Rework this upstream so that Bazel and CMake rules match up
# better.
# All of these have to depend on tensorflow::external_mhlo_includes to
# ensure that include directories are inherited.
"@mlir-hlo//:chlo_legalize_to_hlo": [
"tensorflow::external_mhlo_includes",
"ChloPasses",
],
"@mlir-hlo//:mlir_hlo": [
"tensorflow::external_mhlo_includes",
"MhloDialect",
"MLIRMhloUtils",
],
"@mlir-hlo//:map_chlo_to_hlo_op": [
"ChloOps",
"MhloDialect",
],
"@mlir-hlo//:map_lmhlo_to_scalar_op": [
"tensorflow::external_mhlo_includes",
"LmhloDialect", # Unfortunate.
"MhloDialect",
],
"@mlir-hlo//:map_mhlo_to_scalar_op": [
"tensorflow::external_mhlo_includes",
"MhloDialect",
],
"@mlir-hlo//:mhlo_passes": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
"MhloShapeOpsToStandard",
"MhloToArithmeticConversion",
"MhloToLhloConversion",
"MhloToLinalg",
"MhloToMemrefConversion",
"MhloToStandard",
"StablehloToMhlo",
],
"@mlir-hlo//:unfuse_batch_norm": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
],
"@mlir-hlo//stablehlo:chlo_ops": ["ChloOps",],
"@mlir-hlo//:stablehlo_legalize_to_hlo_pass": ["StablehloToMhlo",],
"@mlir-hlo//stablehlo:broadcast_utils": ["StablehloBroadcastUtils",],
# Torch-MLIR.
"@torch-mlir-dialects//:TorchMLIRTMTensorDialect": [
"TorchMLIRTMTensorDialect"
],
# Vulkan
"@vulkan_headers": ["Vulkan::Headers"],
# Misc single targets
"@com_google_benchmark//:benchmark": ["benchmark"],
"@com_github_dvidelabs_flatcc//:flatcc": ["flatcc"],
"@com_github_dvidelabs_flatcc//:parsing": ["flatcc::parsing"],
"@com_github_dvidelabs_flatcc//:runtime": ["flatcc::runtime"],
"@com_github_yaml_libyaml//:yaml": ["yaml"],
"@com_google_googletest//:gtest": ["gmock", "gtest"],
"@spirv_cross//:spirv_cross_lib": ["spirv-cross-msl"],
"@cpuinfo": ["${IREE_CPUINFO_TARGET}"],
"@vulkan_memory_allocator//:impl_header_only": ["vulkan_memory_allocator"],
}
def _convert_mlir_target(target):
# Default to a pattern substitution approach.
# Take "MLIR" and append the name part of the full target identifier, e.g.
# "@llvm-project//mlir:IR" -> "MLIRIR"
# "@llvm-project//mlir:Pass" -> "MLIRPass"
# MLIR does not have header-only targets apart from the libraries. Here
# we redirect any request for a CAPI{Name}Headers to a target within IREE
# that sets this up.
label = target.rsplit(":")[-1]
if label.startswith("CAPI") and label.endswith("Headers"):
return [f"IREELLVMIncludeSetup"]
else:
return [f"MLIR{label}"]
def _convert_llvm_target(target):
# Default to a pattern substitution approach.
# Prepend "LLVM" to the Bazel target name.
# "@llvm-project//llvm:AsmParser" -> "LLVMAsmParser"
# "@llvm-project//llvm:Core" -> "LLVMCore"
return ["LLVM" + target.rsplit(":")[-1]]
def _convert_iree_dialects_target(target):
# Just take the target name as-is.
return [target.rsplit(":")[-1]]
def _convert_to_cmake_path(bazel_path_fragment: str) -> str:
cmake_path = bazel_path_fragment
# Bazel `//iree/base` -> CMake `iree::base`
# Bazel `//iree/base:foo` -> CMake `iree::base::foo`
if cmake_path.startswith("//"):
cmake_path = cmake_path[len("//"):]
cmake_path = cmake_path.replace(":", "::") # iree/base::foo or ::foo
cmake_path = cmake_path.replace("/", "::") # iree::base
return cmake_path
def convert_target(target):
"""Converts a Bazel target to a list of CMake targets.
IREE targets are expected to follow a standard form between Bazel and CMake
that facilitates conversion. External targets *may* have their own patterns,
or they may be purely special cases.
Multiple target in Bazel may map to a single target in CMake and a Bazel
target may map to multiple CMake targets.
Returns:
A list of converted targets if it was successfully converted.
Raises:
KeyError: No conversion was found for the target.
"""
if target in EXPLICIT_TARGET_MAPPING:
return EXPLICIT_TARGET_MAPPING[target]
if target.startswith("@llvm-project//llvm"):
return _convert_llvm_target(target)
if target.startswith("@llvm-project//mlir"):
return _convert_mlir_target(target)
if target.startswith("@"):
raise KeyError(f"No conversion found for target '{target}'")
if target.startswith("//llvm-external-projects/iree-dialects"):
return _convert_iree_dialects_target(target)
# IREE root paths map to package names based on explicit rules.
# * src/iree/ directories (compiler/src/iree/ and runtime/src/iree/)
# creating their own root paths by trimming down to just "iree"
# * tools/ uses an empty root, for binary targets names like "iree-compile"
# * other top level directories add back an 'iree' prefix
# If changing these, make the corresponding change in iree_macros.cmake
# (iree_package_ns function).
# Map //compiler/src/iree/(.*) -> iree::\1 (i.e. iree::compiler::\1)
m = re.match("^//compiler/src/iree/(.+)", target)
if m:
return ["iree::" + _convert_to_cmake_path(m.group(1))]
# Map //runtime/src/iree/(.*) -> iree::\1
m = re.match("^//runtime/src/iree/(.+)", target)
if m:
return ["iree::" + _convert_to_cmake_path(m.group(1))]
# Map //tools/(.*) -> \1
m = re.match("^//tools[/|:](.+)", target)
if m:
return [_convert_to_cmake_path(m.group(1))]
# Pass through package-relative targets
# :target_name
# file_name.txt
if target.startswith(":") or ":" not in target:
return [_convert_to_cmake_path(target)]
# Default rewrite: prefix with "iree::", without pruning the path.
return ["iree::" + _convert_to_cmake_path(target)]
| {
"content_hash": "31015bdcaf1695c20d02dcd0ae0d2775",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 38.4,
"alnum_prop": 0.6508049242424242,
"repo_name": "iree-org/iree",
"id": "8b7b3ae55fcdc2ed3afd5056bd6d33681b0b124f",
"size": "8666",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build_tools/bazel_to_cmake/bazel_to_cmake_targets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'jjg+google-cloud-python@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-bigquery >= 0.22.1, < 0.23dev',
'google-cloud-bigtable >= 0.22.0, < 0.23dev',
'google-cloud-core >= 0.22.1, < 0.23dev',
'google-cloud-datastore >= 0.22.0, < 0.23dev',
'google-cloud-dns >= 0.22.0, < 0.23dev',
'google-cloud-error-reporting >= 0.22.0, < 0.23dev',
'google-cloud-language >= 0.22.1, < 0.23dev',
'google-cloud-logging >= 0.22.0, < 0.23dev',
'google-cloud-monitoring >= 0.22.0, < 0.23dev',
'google-cloud-pubsub >= 0.22.0, < 0.23dev',
'google-cloud-resource-manager >= 0.22.0, < 0.23dev',
'google-cloud-storage >= 0.22.0, < 0.23dev',
'google-cloud-translate >= 0.22.0, < 0.23dev',
'google-cloud-vision >= 0.22.0, < 0.23dev',
'google-cloud-runtimeconfig >= 0.22.0, < 0.23dev',
]
setup(
name='google-cloud',
version='0.22.0',
description='API Client library for Google Cloud',
long_description=README,
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| {
"content_hash": "0cb72c9a3c2afd37c45fb575abc4c447",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 72,
"avg_line_length": 33.53968253968254,
"alnum_prop": 0.6114529105537151,
"repo_name": "quom/google-cloud-python",
"id": "9937aabb97c69ead08058a489ba16496b3023a2f",
"size": "2689",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3388266"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.