text stringlengths 4 1.02M | meta dict |
|---|---|
from tastypie.resources import ModelResource, ALL
from gopher.models import RequestAirtimeSend, AirtimeApplication
from tastypie import fields
from tastypie.authorization import Authorization
class AirtimeApplicationResource(ModelResource):
"""
To get the required app id
GET:
/api/v1/request/application/?name=test_application
"""
class Meta:
resource_name = "request/application"
queryset = AirtimeApplication.objects.all()
include_resource_uri = True
always_return_data = True
list_allowed_methods = ['get']
filtering = {
'name': ALL}
class RequestAirtimeSendResource(ModelResource):
"""
POST:
url:
/api/v1/request/airtime/
data:
{
"msisdn": "27721231234",
"product_key": "AIRTIME",
"amount": 500,
"request_application": "/api/v1/request/application/1/"
}
"""
request_application = fields.ForeignKey(AirtimeApplicationResource,
'request_application',
full=True)
class Meta:
queryset = RequestAirtimeSend.objects.all()
resource_name = "request/airtime"
list_allowed_methods = ['post', 'get']
include_resource_uri = True
always_return_data = True
authorization = Authorization()
| {
"content_hash": "952047caff934234401ec6a4c98ba966",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 71,
"avg_line_length": 29.74468085106383,
"alnum_prop": 0.6022889842632332,
"repo_name": "westerncapelabs/django-grs-gatewaycms",
"id": "68b37957e8e8f0d16355269343e2ef03c9a035c3",
"size": "1398",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gopher/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92765"
}
],
"symlink_target": ""
} |
from sklearn.utils.estimator_checks import check_estimator
from PLN import BasePLN, PLNR, PLNC
def test_estimator():
return check_estimator(PLNR)
def test_classifier():
return check_estimator(PLNC)
def test_base():
return check_estimator(BasePLN)
| {
"content_hash": "46edd9892af751139a12689f8e1272a1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 18.928571428571427,
"alnum_prop": 0.7471698113207547,
"repo_name": "rohitrawat/PLN_NumPy",
"id": "0a95339811cd95bd75c08ee55a9519c717e9466b",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PLN/tests/test_common.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Python",
"bytes": "10844"
},
{
"name": "Shell",
"bytes": "3145"
}
],
"symlink_target": ""
} |
"""Support for ZHA covers."""
import asyncio
import functools
import logging
from typing import List, Optional
from zigpy.zcl.foundation import Status
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_POSITION,
DEVICE_CLASS_DAMPER,
DEVICE_CLASS_SHADE,
DOMAIN,
CoverEntity,
)
from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core import discovery
from .core.const import (
CHANNEL_COVER,
CHANNEL_LEVEL,
CHANNEL_ON_OFF,
CHANNEL_SHADE,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
SIGNAL_SET_LEVEL,
)
from .core.registries import ZHA_ENTITIES
from .core.typing import ChannelType, ZhaDeviceType
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation cover from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_COVER)
class ZhaCover(ZhaEntity, CoverEntity):
"""Representation of a ZHA cover."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this sensor."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._cover_channel = self.cluster_channels.get(CHANNEL_COVER)
self._current_position = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._cover_channel, SIGNAL_ATTR_UPDATED, self.async_set_position
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state
if "current_position" in last_state.attributes:
self._current_position = last_state.attributes["current_position"]
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
return self.current_cover_position == 0
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def current_cover_position(self):
"""Return the current position of ZHA cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._current_position
@callback
def async_set_position(self, attr_id, attr_name, value):
"""Handle position update from channel."""
_LOGGER.debug("setting position: %s", value)
self._current_position = 100 - value
if self._current_position == 0:
self._state = STATE_CLOSED
elif self._current_position == 100:
self._state = STATE_OPEN
self.async_write_ha_state()
@callback
def async_update_state(self, state):
"""Handle state update from channel."""
_LOGGER.debug("state=%s", state)
self._state = state
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._cover_channel.up_open()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(STATE_OPENING)
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._cover_channel.down_close()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(STATE_CLOSING)
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._cover_channel.go_to_lift_percentage(100 - new_pos)
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(
STATE_CLOSING if new_pos < self._current_position else STATE_OPENING
)
async def async_stop_cover(self, **kwargs):
"""Stop the window cover."""
res = await self._cover_channel.stop()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self._state = STATE_OPEN if self._current_position > 0 else STATE_CLOSED
self.async_write_ha_state()
async def async_update(self):
"""Attempt to retrieve the open/close state of the cover."""
await super().async_update()
await self.async_get_state()
async def async_get_state(self, from_cache=True):
"""Fetch the current state."""
_LOGGER.debug("polling current state")
if self._cover_channel:
pos = await self._cover_channel.get_attribute_value(
"current_position_lift_percentage", from_cache=from_cache
)
_LOGGER.debug("read pos=%s", pos)
if pos is not None:
self._current_position = 100 - pos
self._state = (
STATE_OPEN if self.current_cover_position > 0 else STATE_CLOSED
)
else:
self._current_position = None
self._state = None
@STRICT_MATCH(channel_names={CHANNEL_LEVEL, CHANNEL_ON_OFF, CHANNEL_SHADE})
class Shade(ZhaEntity, CoverEntity):
"""ZHA Shade."""
def __init__(
self,
unique_id: str,
zha_device: ZhaDeviceType,
channels: List[ChannelType],
**kwargs,
):
"""Initialize the ZHA light."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._on_off_channel = self.cluster_channels[CHANNEL_ON_OFF]
self._level_channel = self.cluster_channels[CHANNEL_LEVEL]
self._position = None
self._is_open = None
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_SHADE
@property
def is_closed(self) -> Optional[bool]:
"""Return True if shade is closed."""
if self._is_open is None:
return None
return not self._is_open
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_open_closed
)
self.async_accept_signal(
self._level_channel, SIGNAL_SET_LEVEL, self.async_set_level
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._is_open = last_state.state == STATE_OPEN
if ATTR_CURRENT_POSITION in last_state.attributes:
self._position = last_state.attributes[ATTR_CURRENT_POSITION]
@callback
def async_set_open_closed(self, attr_id: int, attr_name: str, value: bool) -> None:
"""Set open/closed state."""
self._is_open = bool(value)
self.async_write_ha_state()
@callback
def async_set_level(self, value: int) -> None:
"""Set the reported position."""
value = max(0, min(255, value))
self._position = int(value * 100 / 255)
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._on_off_channel.on()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't open cover: %s", res)
return
self._is_open = True
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._on_off_channel.off()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't open cover: %s", res)
return
self._is_open = False
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._level_channel.move_to_level_with_on_off(
new_pos * 255 / 100, 1
)
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't set cover's position: %s", res)
return
self._position = new_pos
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the cover."""
res = await self._level_channel.stop()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't stop cover: %s", res)
return
@STRICT_MATCH(
channel_names={CHANNEL_LEVEL, CHANNEL_ON_OFF}, manufacturers="Keen Home Inc"
)
class KeenVent(Shade):
"""Keen vent cover."""
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_DAMPER
async def async_open_cover(self, **kwargs):
"""Open the cover."""
position = self._position or 100
tasks = [
self._level_channel.move_to_level_with_on_off(position * 255 / 100, 1),
self._on_off_channel.on(),
]
results = await asyncio.gather(*tasks, return_exceptions=True)
if any(isinstance(result, Exception) for result in results):
self.debug("couldn't open cover")
return
self._is_open = True
self._position = position
self.async_write_ha_state()
| {
"content_hash": "3c6e8683eef9e943d6d7ed400ed8aeaf",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 87,
"avg_line_length": 33.770967741935486,
"alnum_prop": 0.6102779635113191,
"repo_name": "partofthething/home-assistant",
"id": "e202def46c5ab7e807a38eb994c92573b7321b6c",
"size": "10469",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['Lag1Trend'] , ['Seasonal_Second'] , ['SVR'] ); | {
"content_hash": "b6b62d79f0c2212a5447b661022d7168",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 80,
"avg_line_length": 38.25,
"alnum_prop": 0.6993464052287581,
"repo_name": "antoinecarme/pyaf",
"id": "f7979e7be5370388176c3ffcc176c7996ad9a85a",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_None/model_control_one_enabled_None_Lag1Trend_Seasonal_Second_SVR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import math
import numpy as np
from bitbots_msgs.action import Kick
from geometry_msgs.msg import Quaternion
from tf_transformations import quaternion_from_euler
from dynamic_stack_decider.abstract_action_element import AbstractActionElement
class AbstractKickAction(AbstractActionElement):
def pop(self):
self.blackboard.world_model.forget_ball(own=True, team=True, reset_ball_filter=True)
super(AbstractKickAction, self).pop()
class KickBallStatic(AbstractKickAction):
def __init__(self, blackboard, dsd, parameters=None):
super(KickBallStatic, self).__init__(blackboard, dsd, parameters)
if 'foot' not in parameters.keys():
# usually, we kick with the right foot
self.kick = 'kick_right' # TODO get actual name of parameter from some config
elif 'right' == parameters['foot']:
self.kick = 'kick_right' # TODO get actual name of parameter from some config
elif 'left' == parameters['foot']:
self.kick = 'kick_left' # TODO get actual name of parameter from some config
else:
self.blackboard.node.get_logger().error(
'The parameter \'{}\' could not be used to decide which foot should kick'.format(parameters['foot']))
def perform(self, reevaluate=False):
if not self.blackboard.animation.is_animation_busy():
self.blackboard.animation.play_animation(self.kick)
class KickBallDynamic(AbstractKickAction):
"""
Kick the ball using bitbots_dynamic_kick
"""
def __init__(self, blackboard, dsd, parameters=None):
super(KickBallDynamic, self).__init__(blackboard, dsd, parameters)
if parameters.get('type', None) == 'penalty':
self.penalty_kick = True
else:
self.penalty_kick = False
self._goal_sent = False
self.kick_length = self.blackboard.config['kick_cost_kick_length']
self.angular_range = self.blackboard.config['kick_cost_angular_range']
self.max_kick_angle = self.blackboard.config['max_kick_angle']
self.num_kick_angles = self.blackboard.config['num_kick_angles']
self.penalty_kick_angle = self.blackboard.config['penalty_kick_angle']
# By default, don't reevaluate
self.never_reevaluate = parameters.get('r', True) and parameters.get('reevaluate', True)
def perform(self, reevaluate=False):
if not self.blackboard.kick.is_currently_kicking:
if not self._goal_sent:
goal = Kick.Goal()
goal.header.stamp = self.blackboard.node.get_clock().now().to_msg()
# currently we use a tested left or right kick
goal.header.frame_id = self.blackboard.world_model.base_footprint_frame # the ball position is stated in this frame
if self.penalty_kick:
goal.kick_speed = 6.7
goal.ball_position.x = 0.22
goal.ball_position.y = 0.0
goal.ball_position.z = 0
goal.unstable = True
# only check 2 directions, left and right
kick_direction = self.blackboard.world_model.get_best_kick_direction(
-self.penalty_kick_angle,
self.penalty_kick_angle,
2,
self.kick_length,
self.angular_range)
else:
ball_u, ball_v = self.blackboard.world_model.get_ball_position_uv()
goal.kick_speed = 1
goal.ball_position.x = ball_u
goal.ball_position.y = ball_v
goal.ball_position.z = 0
goal.unstable = False
kick_direction = self.blackboard.world_model.get_best_kick_direction(
-self.max_kick_angle,
self.max_kick_angle,
self.num_kick_angles,
self.kick_length,
self.angular_range)
goal.kick_direction = Quaternion(*quaternion_from_euler(0, 0, kick_direction))
self.blackboard.kick.kick(goal)
self._goal_sent = True
else:
self.pop()
class KickBallVeryHard(AbstractKickAction):
def __init__(self, blackboard, dsd, parameters=None):
super(KickBallVeryHard, self).__init__(blackboard, dsd, parameters)
if 'foot' not in parameters.keys():
# usually, we kick with the right foot
self.hard_kick = 'kick_right' # TODO get actual name of parameter from some config
elif 'right' == parameters['foot']:
self.hard_kick = 'kick_right' # TODO get actual name of parameter from some config
elif 'left' == parameters['foot']:
self.hard_kick = 'kick_left' # TODO get actual name of parameter from some config
else:
self.blackboard.node.get_logger().error(
'The parameter \'{}\' could not be used to decide which foot should kick'.format(parameters['foot']))
def perform(self, reevaluate=False):
if not self.blackboard.animation.is_animation_busy():
self.blackboard.animation.play_animation(self.hard_kick)
| {
"content_hash": "e7acd043eb9c677d3e24caf0aa96c2a2",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 132,
"avg_line_length": 45.134453781512605,
"alnum_prop": 0.5907652206293055,
"repo_name": "bit-bots/bitbots_behaviour",
"id": "7d7873eadcd32fb4cc84dbd18ab74ce1c8b900ce",
"size": "5371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitbots_body_behavior/bitbots_body_behavior/actions/kick_ball.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "12619"
},
{
"name": "Python",
"bytes": "92540"
}
],
"symlink_target": ""
} |
def indentshell(context,allcontent):
'''
indentshell indents each line between <shell>...</shell> tags by 12 spaces
attention: this function only works if:
1. the shell tag is present once or not at all in allcontent
2. <shell> must not be at the first position
:param context:
:param allcontent: all output up to this time
:return:
'''
try:
allcontent = str(allcontent)
left = allcontent.split("<shell>")
right = left[1].split("</shell>")
master_bash = right[0]
master_bash_lines = master_bash.splitlines(True)
masterbash = ""
for line in master_bash_lines:
masterbash += ' '*12+line
if len(right)==1:
return str(left[0]+masterbash)
return str(left[0]+masterbash+right[1])
except:
return allcontent
| {
"content_hash": "1cbf7a2a1306de2e1bfb5938be2f6d8c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.602803738317757,
"repo_name": "icclab/disco",
"id": "88e5f456424f5b77475b68e075b2073727e186e0",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sm/managers/data/shell/shellfunctions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "175160"
},
{
"name": "XSLT",
"bytes": "23777"
}
],
"symlink_target": ""
} |
"""Script for registration against AzureAutomation agent service."""
import datetime as datetime
import hashlib
import base64
import json
import hmac
import socket
import argparse
import requests
import requests.packages.urllib3
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from OpenSSL import crypto
class AgentService:
def __init__(self, endpoint, account_key, cert_path, cert_key_path, worker_group_name, machine_id):
self.protocol_version = "2.0"
self.endpoint = endpoint
self.account_key = account_key
self.cert_path = cert_path
self.cert_key_path = cert_key_path
self.worker_group_name = worker_group_name
self.machine_id = machine_id.lower()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # disable ssl warnings
requests.packages.urllib3.disable_warnings() # disable insecure platform warning
@staticmethod
def encode_base64_sha256(key, msg):
message = bytes(msg, encoding='utf-8')
secret = bytes(key, encoding='utf-8')
signature = base64.b64encode(hmac.new(secret, message, digestmod=hashlib.sha256).digest())
return signature.decode()
def compute_hmac(self, date, key, payload):
sha256_hash = hashlib.sha256()
sha256_hash.update(json.dumps(payload).encode())
encoded_payload = base64.b64encode(sha256_hash.digest())
str_to_sign = encoded_payload.decode() + "\n" + date # Based on AgentService contract
signature = self.encode_base64_sha256(key, str_to_sign)
return signature
def register_worker(self):
cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(self.cert_path).read())
cert_thumbprint = cert.digest("sha1").decode().replace(":", "")
date = datetime.datetime.utcnow().isoformat() + "0-00:00"
payload = {'RunbookWorkerGroup': self.worker_group_name,
"MachineName": socket.gethostname(),
"IpAddress": socket.gethostbyname(socket.gethostname()),
"Thumbprint": cert_thumbprint,
"Issuer": str(cert.get_issuer()),
"Subject": str(cert.get_subject())}
signature = self.compute_hmac(date, self.account_key, payload)
header = {'Authorization': 'Shared ' + signature,
'ProtocolVersion': self.protocol_version,
'x-ms-date': date,
"Content-Type": "application/json"}
url = "{0}/HybridV2(MachineId='{1}')".format(self.endpoint, self.machine_id)
req = requests.put(url, cert=(self.cert_path, self.cert_key_path), verify=False, headers=header,
data=json.dumps(payload))
if req.status_code is not 200:
print ("Agentservice : Failed to register worker. Status [{0}], Reason [{1}]".format(req.status_code,
req.reason))
return
print ("Agentservice : Registration complete, status [" + str(req.status_code) + "]")
print ("Machine name : " + str(socket.gethostname()))
print ("Machine id : " + str(self.machine_id))
print ("Worker group name : " + str(self.worker_group_name))
print ("Certificate thumbprint : " + str(cert_thumbprint))
def deregister_worker(self):
cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(self.cert_path).read())
cert_thumbprint = cert.digest("sha1").decode().replace(":", "")
date = datetime.datetime.utcnow().isoformat() + "0-00:00"
payload = {"Thumbprint": cert_thumbprint,
"Issuer": str(cert.get_issuer()),
"Subject": str(cert.get_subject())}
signature = self.compute_hmac(date, self.account_key, payload)
header = {'Authorization': 'Shared ' + signature,
'ProtocolVersion': self.protocol_version,
'x-ms-date': date,
"Content-Type": "application/json"}
url = "{0}/Hybrid(MachineId='{1}')".format(self.endpoint, self.machine_id)
req = requests.delete(url, cert=(self.cert_path, self.cert_key_path), verify=False, headers=header,
data=json.dumps(payload))
if req.status_code is not 200:
print ("Agent service : Failed to de-register worker. Status [{0}], Reason [{1}]".format(req.status_code,
req.reason))
return
print ("Agent service : De-registration complete, status [" + str(req.status_code) + "]")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-url', help='Registration url', required=True)
parser.add_argument('-accountkey', help='Automation account key', required=True)
parser.add_argument('-workergroupname', help='worker group name', required=True)
parser.add_argument('-cert', help='Certificate file path', required=True)
parser.add_argument('-key', help='Private key file path', required=True)
parser.add_argument('-machineid', help='Machine id', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--register', action='store_true', help='Register the worker')
group.add_argument('--deregister', action='store_true', help='De-register the worker')
args = parser.parse_args()
agent = AgentService(args.url, args.accountkey, args.cert, args.key, args.workergroupname,
args.machineid)
if args.register:
agent.register_worker()
elif args.deregister:
agent.deregister_worker()
if __name__ == "__main__":
main()
| {
"content_hash": "14db2292722be5722f2d1cc0843df0d0",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 117,
"avg_line_length": 43.343283582089555,
"alnum_prop": 0.6113980716253443,
"repo_name": "MSFTOSSMgmt/WPSDSCLinux",
"id": "76ebaf3a4903ed47507f54c43d907d58a1e66b0b",
"size": "5971",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Providers/nxOMSAutomationWorker/automationworker/3.x/scripts/register.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5870322"
},
{
"name": "C#",
"bytes": "98943"
},
{
"name": "C++",
"bytes": "670183"
},
{
"name": "CMake",
"bytes": "13826"
},
{
"name": "HTML",
"bytes": "166861"
},
{
"name": "Makefile",
"bytes": "164013"
},
{
"name": "Objective-C",
"bytes": "61644"
},
{
"name": "PowerShell",
"bytes": "40239"
},
{
"name": "Python",
"bytes": "1858427"
},
{
"name": "Shell",
"bytes": "8136"
},
{
"name": "SourcePawn",
"bytes": "60242"
},
{
"name": "Yacc",
"bytes": "35814"
}
],
"symlink_target": ""
} |
"""
Extend numpy's decorators to use nipy's gui and data labels.
"""
from numpy.testing.decorators import *
from nipy.utils import templates, example_data, DataError
def make_label_dec(label, ds=None):
"""Factory function to create a decorator that applies one or more labels.
Parameters
----------
label : str or sequence
One or more labels that will be applied by the decorator to the
functions it decorates. Labels are attributes of the decorated function
with their value set to True.
ds : str
An optional docstring for the resulting decorator. If not given, a
default docstring is auto-generated.
Returns
-------
ldec : function
A decorator.
Examples
--------
>>> slow = make_label_dec('slow')
>>> print slow.__doc__
Labels a test as 'slow'
>>> rare = make_label_dec(['slow','hard'],
... "Mix labels 'slow' and 'hard' for rare tests")
>>> @rare
... def f(): pass
...
>>>
>>> f.slow
True
>>> f.hard
True
"""
if isinstance(label,basestring):
labels = [label]
else:
labels = label
# Validate that the given label(s) are OK for use in setattr() by doing a
# dry run on a dummy function.
tmp = lambda : None
for label in labels:
setattr(tmp,label,True)
# This is the actual decorator we'll return
def decor(f):
for label in labels:
setattr(f,label,True)
return f
# Apply the user's docstring
if ds is None:
ds = "Labels a test as %r" % label
decor.__doc__ = ds
return decor
# Nipy specific labels
gui = make_label_dec('gui')
data = make_label_dec('data')
# For tests that need further review
def needs_review(msg):
""" Skip a test that needs further review.
Parameters
----------
msg : string
msg regarding the review that needs to be done
"""
def skip_func(func):
return skipif(True, msg)(func)
return skip_func
# Easier version of the numpy knownfailure
def knownfailure(f):
return knownfailureif(True)(f)
def if_datasource(ds, msg):
try:
ds.get_filename()
except DataError:
return skipif(True, msg)
return lambda f : f
def if_templates(f):
return if_datasource(templates, 'Cannot find template data')(f)
def if_example_data(f):
return if_datasource(example_data, 'Cannot find example data')(f)
def skip_doctest_if(condition):
"""Decorator - mark a function or method for skipping its doctest.
This decorator allows you to mark a function whose docstring you wish to
omit from testing, while preserving the docstring for introspection, help,
etc."""
if not condition:
return lambda f : f
return make_label_dec('skip_doctest')
| {
"content_hash": "e0010079c60078ccae31d8d8bbc3315b",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 24.789473684210527,
"alnum_prop": 0.6245576786978061,
"repo_name": "bthirion/nipy",
"id": "f729adff411c08326f2854fc947ba36427d1bbb2",
"size": "2940",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipy/testing/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6381240"
},
{
"name": "C++",
"bytes": "6189"
},
{
"name": "CSS",
"bytes": "8170"
},
{
"name": "M",
"bytes": "560"
},
{
"name": "Matlab",
"bytes": "4948"
},
{
"name": "Python",
"bytes": "3068962"
},
{
"name": "TeX",
"bytes": "238"
}
],
"symlink_target": ""
} |
from sahara import conductor
from sahara import context
from sahara.tests.unit import base
from sahara.utils import general as gu
SAMPLE_CLUSTER = {
'plugin_name': 'test_plugin',
'hadoop_version': 'test_version',
'tenant_id': 'tenant_1',
'name': 'test_cluster',
'user_keypair_id': 'my_keypair',
'node_groups': [
{
'name': 'ng_1',
'flavor_id': '42',
'node_processes': ['p1', 'p2'],
'count': 1
},
{
'name': 'ng_2',
'flavor_id': '42',
'node_processes': ['p3', 'p4'],
'count': 3
}
],
'cluster_configs': {
'service_1': {
'config_2': 'value_2'
},
'service_2': {
'config_1': 'value_1'
}
},
}
SAMPLE_NODE_GROUP = {
'name': 'ng_3',
'flavor_id': '42',
'node_processes': ['p5', 'p6'],
'count': 5
}
SAMPLE_INSTANCE = {
'instance_name': 'test-name',
'instance_id': '123456',
'management_ip': '1.2.3.1'
}
class TestConductorClusterApi(base.SaharaWithDbTestCase):
def setUp(self):
super(TestConductorClusterApi, self).setUp()
self.api = conductor.API
def _make_sample(self):
ctx = context.ctx()
cluster = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
return ctx, cluster
def test_update_by_id(self):
ctx, cluster = self._make_sample()
self.api.cluster_update(ctx, cluster.id, {'name': 'changed'})
updated_cluster = self.api.cluster_get(ctx, cluster.id)
self.assertEqual(updated_cluster['name'], 'changed')
self.api.cluster_destroy(ctx, updated_cluster.id)
cluster_list = self.api.cluster_get_all(ctx)
self.assertEqual(len(cluster_list), 0)
def test_add_node_group_to_cluster_id(self):
ctx, cluster = self._make_sample()
ng_id = self.api.node_group_add(ctx, cluster.id, SAMPLE_NODE_GROUP)
self.assertTrue(ng_id)
def test_update_node_group_by_id(self):
ctx, cluster = self._make_sample()
ng_id = cluster.node_groups[0].id
self.api.node_group_update(ctx, ng_id, {'name': 'changed_ng'})
cluster = self.api.cluster_get(ctx, cluster.id)
ng = gu.get_by_id(cluster.node_groups, ng_id)
self.assertEqual(ng.name, 'changed_ng')
def test_add_instance_to_node_group_id(self):
ctx, cluster = self._make_sample()
inst_id = self.api.instance_add(ctx, cluster.node_groups[0].id,
SAMPLE_INSTANCE)
self.assertTrue(inst_id)
def test_update_instance_by_id(self):
ctx, cluster = self._make_sample()
ng_id = cluster.node_groups[0].id
inst_id = self.api.instance_add(ctx, ng_id, SAMPLE_INSTANCE)
self.api.instance_update(ctx, inst_id, {'instance_name': 'tst123'})
cluster = self.api.cluster_get(ctx, cluster.id)
ng = gu.get_by_id(cluster.node_groups, ng_id)
self.assertEqual(ng.instances[0].instance_name, 'tst123')
| {
"content_hash": "53faed98d51c20641db5efc671ca3ef7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 75,
"avg_line_length": 29.980392156862745,
"alnum_prop": 0.5676913015042512,
"repo_name": "citrix-openstack-build/sahara",
"id": "43205e0ce9be0cbb99f1c8cd747f92c99a7588e0",
"size": "3641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara/tests/unit/conductor/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "PigLatin",
"bytes": "161"
},
{
"name": "Python",
"bytes": "2064299"
},
{
"name": "Shell",
"bytes": "16736"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_profile_protocol_options
short_description: Configure protocol options in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and profile_protocol_options category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_profile_protocol_options:
description:
- Configure protocol options.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comment:
description:
- Optional comments.
type: str
dns:
description:
- Configure DNS protocol options.
type: dict
suboptions:
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
ftp:
description:
- Configure FTP protocol options.
type: dict
suboptions:
comfort_amount:
description:
- Amount of data to send in a transmission for client comforting (1 - 10240 bytes).
type: int
comfort_interval:
description:
- Period of time between start, or last transmission, and the next client comfort transmission of data (1 - 900 sec).
type: int
inspect_all:
description:
- Enable/disable the inspection of all ports for the protocol.
type: str
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- clientcomfort
- oversize
- splice
- bypass-rest-command
- bypass-mode-command
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
http:
description:
- Configure HTTP protocol options.
type: dict
suboptions:
block_page_status_code:
description:
- Code number returned for blocked HTTP pages (non-FortiGuard only) (100 - 599).
type: int
comfort_amount:
description:
- Amount of data to send in a transmission for client comforting (1 - 10240 bytes).
type: int
comfort_interval:
description:
- Period of time between start, or last transmission, and the next client comfort transmission of data (1 - 900 sec).
type: int
fortinet_bar:
description:
- Enable/disable Fortinet bar on HTML content.
type: str
choices:
- enable
- disable
fortinet_bar_port:
description:
- Port for use by Fortinet Bar (1 - 65535).
type: int
http_policy:
description:
- Enable/disable HTTP policy check.
type: str
choices:
- disable
- enable
inspect_all:
description:
- Enable/disable the inspection of all ports for the protocol.
type: str
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- clientcomfort
- servercomfort
- oversize
- chunkedbypass
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
post_lang:
description:
- ID codes for character sets to be used to convert to UTF-8 for banned words and DLP on HTTP posts (maximum of 5 character sets).
type: str
choices:
- jisx0201
- jisx0208
- jisx0212
- gb2312
- ksc5601-ex
- euc-jp
- sjis
- iso2022-jp
- iso2022-jp-1
- iso2022-jp-2
- euc-cn
- ces-gbk
- hz
- ces-big5
- euc-kr
- iso2022-jp-3
- iso8859-1
- tis620
- cp874
- cp1252
- cp1251
range_block:
description:
- Enable/disable blocking of partial downloads.
type: str
choices:
- disable
- enable
retry_count:
description:
- Number of attempts to retry HTTP connection (0 - 100).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
streaming_content_bypass:
description:
- Enable/disable bypassing of streaming content from buffering.
type: str
choices:
- enable
- disable
strip_x_forwarded_for:
description:
- Enable/disable stripping of HTTP X-Forwarded-For header.
type: str
choices:
- disable
- enable
switching_protocols:
description:
- Bypass from scanning, or block a connection that attempts to switch protocol.
type: str
choices:
- bypass
- block
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
imap:
description:
- Configure IMAP protocol options.
type: dict
suboptions:
inspect_all:
description:
- Enable/disable the inspection of all ports for the protocol.
type: str
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- fragmail
- oversize
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
mail_signature:
description:
- Configure Mail signature.
type: dict
suboptions:
signature:
description:
- Email signature to be added to outgoing email (if the signature contains spaces, enclose with quotation marks).
type: str
status:
description:
- Enable/disable adding an email signature to SMTP email messages as they pass through the FortiGate.
type: str
choices:
- disable
- enable
mapi:
description:
- Configure MAPI protocol options.
type: dict
suboptions:
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- fragmail
- oversize
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
name:
description:
- Name.
required: true
type: str
nntp:
description:
- Configure NNTP protocol options.
type: dict
suboptions:
inspect_all:
description:
- Enable/disable the inspection of all ports for the protocol.
type: str
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- oversize
- splice
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
oversize_log:
description:
- Enable/disable logging for antivirus oversize file blocking.
type: str
choices:
- disable
- enable
pop3:
description:
- Configure POP3 protocol options.
type: dict
suboptions:
inspect_all:
description:
- Enable/disable the inspection of all ports for the protocol.
type: str
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- fragmail
- oversize
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
replacemsg_group:
description:
- Name of the replacement message group to be used Source system.replacemsg-group.name.
type: str
rpc_over_http:
description:
- Enable/disable inspection of RPC over HTTP.
type: str
choices:
- enable
- disable
smtp:
description:
- Configure SMTP protocol options.
type: dict
suboptions:
inspect_all:
description:
- Enable/disable the inspection of all ports for the protocol.
type: str
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
type: str
choices:
- fragmail
- oversize
- splice
oversize_limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB).
type: int
ports:
description:
- Ports to scan for content (1 - 65535).
type: int
scan_bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
type: str
choices:
- enable
- disable
server_busy:
description:
- Enable/disable SMTP server busy when server not available.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
type: str
choices:
- enable
- disable
uncompressed_nest_limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100).
type: int
uncompressed_oversize_limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited).
type: int
switching_protocols_log:
description:
- Enable/disable logging for HTTP/HTTPS switching protocols.
type: str
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure protocol options.
fortios_firewall_profile_protocol_options:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_profile_protocol_options:
comment: "Optional comments."
dns:
ports: "5"
status: "enable"
ftp:
comfort_amount: "8"
comfort_interval: "9"
inspect_all: "enable"
options: "clientcomfort"
oversize_limit: "12"
ports: "13"
scan_bzip2: "enable"
status: "enable"
uncompressed_nest_limit: "16"
uncompressed_oversize_limit: "17"
http:
block_page_status_code: "19"
comfort_amount: "20"
comfort_interval: "21"
fortinet_bar: "enable"
fortinet_bar_port: "23"
http_policy: "disable"
inspect_all: "enable"
options: "clientcomfort"
oversize_limit: "27"
ports: "28"
post_lang: "jisx0201"
range_block: "disable"
retry_count: "31"
scan_bzip2: "enable"
status: "enable"
streaming_content_bypass: "enable"
strip_x_forwarded_for: "disable"
switching_protocols: "bypass"
uncompressed_nest_limit: "37"
uncompressed_oversize_limit: "38"
imap:
inspect_all: "enable"
options: "fragmail"
oversize_limit: "42"
ports: "43"
scan_bzip2: "enable"
status: "enable"
uncompressed_nest_limit: "46"
uncompressed_oversize_limit: "47"
mail_signature:
signature: "<your_own_value>"
status: "disable"
mapi:
options: "fragmail"
oversize_limit: "53"
ports: "54"
scan_bzip2: "enable"
status: "enable"
uncompressed_nest_limit: "57"
uncompressed_oversize_limit: "58"
name: "default_name_59"
nntp:
inspect_all: "enable"
options: "oversize"
oversize_limit: "63"
ports: "64"
scan_bzip2: "enable"
status: "enable"
uncompressed_nest_limit: "67"
uncompressed_oversize_limit: "68"
oversize_log: "disable"
pop3:
inspect_all: "enable"
options: "fragmail"
oversize_limit: "73"
ports: "74"
scan_bzip2: "enable"
status: "enable"
uncompressed_nest_limit: "77"
uncompressed_oversize_limit: "78"
replacemsg_group: "<your_own_value> (source system.replacemsg-group.name)"
rpc_over_http: "enable"
smtp:
inspect_all: "enable"
options: "fragmail"
oversize_limit: "84"
ports: "85"
scan_bzip2: "enable"
server_busy: "enable"
status: "enable"
uncompressed_nest_limit: "89"
uncompressed_oversize_limit: "90"
switching_protocols_log: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_profile_protocol_options_data(json):
option_list = ['comment', 'dns', 'ftp',
'http', 'imap', 'mail_signature',
'mapi', 'name', 'nntp',
'oversize_log', 'pop3', 'replacemsg_group',
'rpc_over_http', 'smtp', 'switching_protocols_log']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_profile_protocol_options(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_profile_protocol_options'] and data['firewall_profile_protocol_options']:
state = data['firewall_profile_protocol_options']['state']
else:
state = True
firewall_profile_protocol_options_data = data['firewall_profile_protocol_options']
filtered_data = underscore_to_hyphen(filter_firewall_profile_protocol_options_data(firewall_profile_protocol_options_data))
if state == "present":
return fos.set('firewall',
'profile-protocol-options',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'profile-protocol-options',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_profile_protocol_options']:
resp = firewall_profile_protocol_options(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_profile_protocol_options": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"dns": {"required": False, "type": "dict",
"options": {
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"ftp": {"required": False, "type": "dict",
"options": {
"comfort_amount": {"required": False, "type": "int"},
"comfort_interval": {"required": False, "type": "int"},
"inspect_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["clientcomfort", "oversize", "splice",
"bypass-rest-command", "bypass-mode-command"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"http": {"required": False, "type": "dict",
"options": {
"block_page_status_code": {"required": False, "type": "int"},
"comfort_amount": {"required": False, "type": "int"},
"comfort_interval": {"required": False, "type": "int"},
"fortinet_bar": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fortinet_bar_port": {"required": False, "type": "int"},
"http_policy": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"inspect_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["clientcomfort", "servercomfort", "oversize",
"chunkedbypass"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"post_lang": {"required": False, "type": "str",
"choices": ["jisx0201", "jisx0208", "jisx0212",
"gb2312", "ksc5601-ex", "euc-jp",
"sjis", "iso2022-jp", "iso2022-jp-1",
"iso2022-jp-2", "euc-cn", "ces-gbk",
"hz", "ces-big5", "euc-kr",
"iso2022-jp-3", "iso8859-1", "tis620",
"cp874", "cp1252", "cp1251"]},
"range_block": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"retry_count": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"streaming_content_bypass": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"strip_x_forwarded_for": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"switching_protocols": {"required": False, "type": "str",
"choices": ["bypass", "block"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"imap": {"required": False, "type": "dict",
"options": {
"inspect_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"mail_signature": {"required": False, "type": "dict",
"options": {
"signature": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}},
"mapi": {"required": False, "type": "dict",
"options": {
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"name": {"required": True, "type": "str"},
"nntp": {"required": False, "type": "dict",
"options": {
"inspect_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["oversize", "splice"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"oversize_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"pop3": {"required": False, "type": "dict",
"options": {
"inspect_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"replacemsg_group": {"required": False, "type": "str"},
"rpc_over_http": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"smtp": {"required": False, "type": "dict",
"options": {
"inspect_all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize", "splice"]},
"oversize_limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan_bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"server_busy": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed_nest_limit": {"required": False, "type": "int"},
"uncompressed_oversize_limit": {"required": False, "type": "int"}
}},
"switching_protocols_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| {
"content_hash": "bb6de89c0e56acbb5ea69e3e872f3c83",
"timestamp": "",
"source": "github",
"line_count": 1082,
"max_line_length": 158,
"avg_line_length": 43.6460258780037,
"alnum_prop": 0.41511911064055057,
"repo_name": "thaim/ansible",
"id": "825ab88bb65b9ed4ba705baa91eee6cf869017b3",
"size": "47243",
"binary": false,
"copies": "13",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/fortios/fortios_firewall_profile_protocol_options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import argparse
from pprint import pprint
import myhdl
from myhdl import Signal, intbv, ConcatSignal, always_comb, concat
from rhea.system import Global, Clock, Reset
from rhea.cores.misc import io_stub
from rhea.cores.comm import prbs_generate
from rhea.cores.comm import prbs_check
from rhea.vendor import Vendor
from rhea.vendor import input_diff_buffer
from rhea.vendor import output_diff_buffer
from rhea.vendor import device_serdes_input
from rhea.vendor import device_serdes_output
from rhea.build import get_board
@myhdl.block
def parallella_serdes(
clock,
# porcupine board breakout
serial_tx_p, serial_tx_n,
serial_rx_p, serial_rx_n,
led, reset=None
):
"""
"""
assert len(led) == 8
nbanks = len(serial_tx_p)
assert (len(serial_tx_p) == len(serial_tx_n) ==
len(serial_rx_p) == len(serial_rx_n) )
glbl = Global(clock, reset)
# signal interface to the prbs generate / check
locked = [Signal(bool(0)) for _ in range(nbanks)]
inject_error = [Signal(bool(0)) for _ in range(nbanks)]
word_count = [Signal(intbv(0)[64:]) for _ in range(nbanks)]
error_count = [Signal(intbv(0)[64:]) for _ in range(nbanks)]
prbsi = [Signal(intbv(0)[1:]) for _ in range(nbanks)]
prbso = [Signal(intbv(0)[1:]) for _ in range(nbanks)]
# diff buffers for the diff signals
obuf_inst = output_diff_buffer(prbso, serial_tx_p, serial_tx_n)
ibuf_inst = input_diff_buffer(serial_rx_p, serial_rx_n, prbsi)
insts = []
for bank in range(nbanks):
gen_inst = prbs_generate(
glbl, prbso[bank], inject_error[bank],
order=23
)
insts += [gen_inst]
chk_inst = prbs_check(
glbl, prbsi[bank], locked[bank],
word_count[bank], error_count[bank],
order=23
)
insts += [chk_inst]
locks = ConcatSignal(*reversed(locked))
@always_comb
def led_assign():
led.next = concat("1010", locks[4:])
return myhdl.instances()
def build(args):
# @todo: use parallella board, use an ISE support board for now ...
brd = get_board('parallella')
# @todo: temporary for existing board
# brd.add_reset('reset', active=1, isasync=True, pins=('N20',))
brd.add_port_name('serial_tx_p', 'gpio_p', slice(4, 8))
brd.add_port_name('serial_tx_n', 'gpio_n', slice(4, 8))
brd.add_port_name('serial_rx_p', 'gpio_p', slice(8, 12))
brd.add_port_name('serial_rx_n', 'gpio_n', slice(8, 12))
flow = brd.get_flow(parallella_serdes)
flow.run()
info = flow.get_utilization()
pprint(info)
def cliparse():
parser = argparse.ArgumentParser()
parser.add_argument("--build", action='store_true', default=False)
parser.add_argument("--program", action='store_true', default=False)
parser.add_argument("--trace", action='store_true', default=False)
args = parser.parse_args()
return args
def main():
args = cliparse()
build(args)
if __name__ == '__main__':
main()
| {
"content_hash": "0fc96ef8e92623015b967b6575d79863",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 74,
"avg_line_length": 28.34862385321101,
"alnum_prop": 0.6300970873786408,
"repo_name": "cfelton/rhea",
"id": "9e3c114050b4dd73e16f55753933e5b7cfd6bdd8",
"size": "3091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/boards/parallella/test_serdes/parallella_serdes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "Python",
"bytes": "672143"
},
{
"name": "Shell",
"bytes": "1590"
},
{
"name": "VHDL",
"bytes": "10452"
},
{
"name": "Verilog",
"bytes": "22193"
}
],
"symlink_target": ""
} |
"""
This module is for bundled packages when we don't always want to use the bundled version.
"""
from __future__ import absolute_import
try:
import ktlxml
except ImportError as e:
from . import ktlxml
def get_gui():
"""Function to delay import of GUI"""
try:
import GUI
except ImportError as e:
from . import GUI
return GUI
def install_WeakRef():
"""Install the weakref module."""
from . import WeakRef
import sys
sys.modules['WeakRef'] = WeakRef
install_WeakRef()
| {
"content_hash": "98e112003c71157ea5031f4c5e0caf6a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 89,
"avg_line_length": 21.04,
"alnum_prop": 0.6577946768060836,
"repo_name": "alexrudy/Cauldron",
"id": "4a3acfffdfc21d5f5b716e560a08a09f43f1f271",
"size": "526",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Cauldron/bundled/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "840330"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import pytest
from tests.utils.dockerized import dockerized_case
from tests.image_builder.distributions.ubuntu import UbuntuBuilder
@pytest.mark.usefixtures("agent_environment")
@dockerized_case(
UbuntuBuilder,
__file__,
file_paths_to_copy=["/scalyr-agent.deb"],
artifacts_use_subdirectory=False,
)
def test_build_deb_package(request):
"""
Mock function which is used to build agent debian package.
"""
pass
| {
"content_hash": "7bc00e2341d91d8014bf4b224860fcf1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 66,
"avg_line_length": 24.434782608695652,
"alnum_prop": 0.7330960854092526,
"repo_name": "imron/scalyr-agent-2",
"id": "7d126ca05e5cac08cd31c14c34c76221da1c5303",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/distribution/deb_package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1297"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "2093708"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from setuptools import find_packages
setup(
name='spock',
description='A pure python framework that implements the 1.8 Minecraft '
'protocol for building Minecraft clients',
license='MIT',
long_description=open('README.rst').read(),
version='0.1.2',
url='https://github.com/SpockBotMC/SpockBot',
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'PyCrypto >= 2.6.1',
],
keywords=['minecraft'],
classifiers=[
'License :: OSI Approved :: MIT License',
]
)
| {
"content_hash": "93f950c4c9303fd2fdf558de3576baee",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 28.19047619047619,
"alnum_prop": 0.6317567567567568,
"repo_name": "txomon/SpockBot",
"id": "d6c4ce465a502ab1fa931066e674bdc1750e39b4",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "231289"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module connects to the EMDN as a subscriber and writes all the changes in the MongoDB database.
"""
import logging
import os
import simplejson
import time
import traceback
import zlib
from pwd import getpwnam
import pymongo
import zmq
from daemon import runner
#Settings
endpoint_url = 'tcp://firehose.elite-market-data.net:9500' # EMDN subscriber endpoint URL
dbhost = 'localhost' # MongoDB server
dbport = 27017 # MongoDB port
max_data_length = 800 # Maximum character length of a received message
expected_version = '0.1' # The version of EMDN the script was written for
# Get the UIDs to run the daemon
uinfo = getpwnam('feeder')
categories = {
'foods': 'Foods',
'textiles': 'Textiles',
'industrial_materials': 'Industrial Materials',
'chemicals': 'Chemicals',
'medicines': 'Medicines',
'drugs': 'Drugs',
'machinery': 'Machinery',
'technology': 'Technology',
'consumer_items': 'Consumer Items',
'waste': 'Waste',
'metals': 'Metals',
'minerals': 'Minerals',
'weapons': 'Weapons'
}
items = {
'grain': 'Grain',
'animalmeat': 'Animal Meat',
'fish': 'Fish',
'foodcartridges': 'Food Cartridges',
'syntheticmeat': 'Synthetic Meat',
'tea': 'Tea',
'coffee': 'Coffee',
'leather': 'Leather',
'naturalfabrics': 'Natural Fabrics',
'syntheticfabrics': 'Synthetic Fabrics',
'polymers': 'Polymers',
'semiconductors': 'Semiconductors',
'superconductors': 'Superconductors',
'hydrogenfuel': 'Hydrogen Fuel',
'performanceenhancers': 'Performance Enhancers',
'basicmedicines': 'Basic Medicines',
'beer': 'Beer',
'mineralextractors': 'Mineral Extractors',
'cropharvesters': 'Crop Harvesters',
'hazardousenvironmentsuits': 'Hazardous Environment Suits',
'robotics': 'Robotics',
'autofabricators': 'Auto Fabricators',
'domesticappliances': 'Domestic Appliances',
'consumertechnology': 'Consumer Technology',
'clothing': 'Clothing',
'biowaste': 'Biowaste',
'scrap': 'Scrap',
'progenitorcells': 'Progenitor Cells',
'gold': 'Gold',
'beryllium': 'Beryllium',
'indium': 'Indium',
'gallium': 'Gallium',
'tantalum': 'Tantalum',
'uranium': 'Uranium',
'lithium': 'Lithium',
'titanium': 'Titanium',
'copper': 'Copper',
'aluminium': 'Aluminium',
'algae': 'Algae',
'fruitandvegetables': 'Fruits and Vegetables',
'mineraloil': 'Mineral Oil',
'pesticides': 'Pesticides',
'agriculturalmedicines': 'Agricultural Medicines',
'tobacco': 'Tobacco',
'wine': 'Wine',
'liquor': 'Liquor',
'animalmonitors': 'Animal Monitors',
'terrainenrichmentsystems': 'Terrain Enrichment Systems',
'personalweapons': 'Personal Weapons',
'heliostaticfurnaces': 'Heliostatic Furnaces',
'marinesupplies': 'Marine Supplies',
'computercomponents': 'Computer Components',
'aquaponicsystems': 'Aquaponic Systems',
'palladium': 'Palladium',
'silver': 'Silver',
'gallite': 'Gallite',
'cobalt': 'Cobalt',
'rutile': 'Rutile',
'reactivearmour': 'Reactive Armour',
'nonlethalweapons': 'Non-Lethal Weapons',
'bertrandite': 'Bertrandite',
'coltan': 'Coltan',
'bauxite': 'Bauxite',
'explosives': 'Explosives',
'bioreducinglichen': 'Bio-Reducing Lichen',
'indite': 'Indite',
'lepidolite': 'Lepidolite',
'uraninite': 'Uraninite',
'advancedcatalysers': 'Advanced Catalysers',
'combatstabilisers': 'Combat Stabilisers',
'resonatingseparators': 'Resonating Separators',
'basicnarcotics': 'Basic Narcotics'
}
class Feeder(object):
def __init__(self):
piddir = '/var/run/feeder/'
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/tty'
self.pidfile_path = os.path.join(piddir, 'feeder.pid')
self.pidfile_timeout = 5
#Check that the PID folder exists
if not os.path.isdir(piddir):
os.mkdir(piddir, 711)
os.chown(piddir, uinfo.pw_uid, uinfo.pw_gid)
def run(self):
""" Main loop. Connects and receives the data """
ctx = zmq.Context()
logger.info('Starting')
while(True):
updates = ctx.socket(zmq.SUB)
updates.linger = 0
updates.setsockopt(zmq.SUBSCRIBE, '')
try:
updates.connect(endpoint_url)
logger.info('Connected')
while(True):
market_json = zlib.decompress(updates.recv())
Feeder.process(market_json)
except Exception as ex:
logger.warning('An error occured: {0}. Reconnecting in 10 seconds.', ex.message)
logger.debug(traceback.format_exc())
# Something went terribly wrong D:
# Wait 10 seconds and try again
time.sleep(10)
@staticmethod
def process(data):
""" Saves the specified JSON into the database """
if len(data) > max_data_length:
notify_dropped_frame(data, 'it is suspiciously long!')
return
try:
pdata = simplejson.loads(data)
except simplejson.JSONDecodeError as ex:
notify_dropped_frame(data, 'it could not be parsed by simplejson')
return
if 'version' not in pdata:
notify_dropped_frame(data, 'it does not contain a version number')
return
if pdata['version'] != expected_version:
logger.info('A frame contains an unexpected version ({0}).'.format(version))
if 'type' not in pdata:
notify_drapped_frame(data, 'it does not contain a type descriptor')
return
if pdata['type'] != 'marketquote':
notify_dropped_frame(data, 'it has an unknown type ({0})'.format(type))
return
# Add display names
if 'categoryName' in pdata['message']:
if pdata['message']['categoryName'] in categories:
pdata['message']['categoryDisplayName'] = categories[pdata['message']['categoryName']]
else:
logger.warning('Missing category display name: ' + pdata['message']['categoryName'])
pdata['message']['categoryDisplayName'] = pdata['message']['categoryName']
if 'itemName' in pdata['message']:
if pdata['message']['itemName'] in items:
pdata['message']['itemDisplayName'] = items[pdata['message']['itemName']]
else:
logger.warning('Missing item display name: ' + pdata['message']['itemName'])
# Drop unknown item names !
return
else:
logger.warning('Received an item without a name !')
logger.debug('Frame contents: ' + data)
return
# Insert the data
mongo = pymongo.Connection(dbhost, dbport)
db = mongo.ccompanion
table = db.marketquote
table.ensure_index([('stationName', 1), ('itemName', 1)]) # TODO : Move this to do it only once ?
table.update({'stationName': pdata['message']['stationName'], 'itemName': pdata['message']['itemName']},
pdata['message'],
upsert=True)
@staticmethod
def notify_dropped_frame(data, reason):
logger.warning('A data frame was dropped because ' + reason)
logger.debug('Frame contents: ' + data)
app = Feeder()
logger = logging.getLogger('FeederDaemonLog')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.FileHandler('/var/log/feeder/feeder.log')
handler.setFormatter(formatter)
logger.addHandler(handler)
runner = runner.DaemonRunner(app)
runner.daemon_context.files_preserve = [handler.stream]
runner.daemon_context.uid = uinfo.pw_uid
runner.daemon_context.gid = uinfo.pw_gid
runner.do_action()
| {
"content_hash": "0a19043c12265aa309045448a0cef220",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 112,
"avg_line_length": 34.436440677966104,
"alnum_prop": 0.6052663959640704,
"repo_name": "Shtong/CmdrCompanion",
"id": "117240e733f72db971fc188eade4a2d635639720",
"size": "8129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/feeder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "263532"
},
{
"name": "Python",
"bytes": "8909"
},
{
"name": "Ruby",
"bytes": "375"
},
{
"name": "Shell",
"bytes": "1009"
}
],
"symlink_target": ""
} |
import os
from unittest.mock import patch
import matplotlib.pyplot as plt
import pytest
from astropy import units as u
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
MSX_HEADER = fits.Header.fromtextfile(get_pkg_data_filename('data/msx_header'))
def teardown_function(function):
plt.close('all')
def test_getaxislabel(ignore_matplotlibrc):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, 'set_position') as pos1:
with patch.object(ax.coords[1].axislabels, 'set_position') as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
def test_label_visibility_rules_default(ignore_matplotlibrc, ax):
assert_label_draw(ax, True, True)
def test_label_visibility_rules_label(ignore_matplotlibrc, ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, False, False)
def test_label_visibility_rules_ticks(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule('ticks')
ax.coords[1].set_axislabel_visibility_rule('ticks')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, False)
def test_label_visibility_rules_always(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule('always')
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, True)
def test_format_unit():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ori_fu = ax.coords[1].get_format_unit()
assert ori_fu == "deg"
ax.coords[1].set_format_unit("arcsec")
fu = ax.coords[1].get_format_unit()
assert fu == "arcsec"
def test_set_separator():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ax.coords[1].set_format_unit('deg')
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
ax.coords[1].set_separator((':', ':', ''))
assert ax.coords[1].format_coord(4) == '4:00:00'
ax.coords[1].set_separator('abc')
assert ax.coords[1].format_coord(4) == '4a00b00c'
ax.coords[1].set_separator(None)
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
| {
"content_hash": "2eb975cab8ed3e2f5a1ec2864302afc2",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 27.10924369747899,
"alnum_prop": 0.6636701797892126,
"repo_name": "lpsinger/astropy",
"id": "4a6933e5d3b2fe4955de43578f3688cc39cc9b8e",
"size": "3291",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/visualization/wcsaxes/tests/test_coordinate_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
"""Model for an Oppia exploration."""
import datetime
from constants import constants
import core.storage.base_model.gae_models as base_models
import core.storage.user.gae_models as user_models
import feconf
from google.appengine.ext import ndb
class ExplorationSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for an exploration snapshot."""
pass
class ExplorationSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of an exploration snapshot."""
pass
class ExplorationModel(base_models.VersionedModel):
"""Versioned storage model for an Oppia exploration.
This class should only be imported by the exploration domain file, the
exploration services file, and the Exploration model test file.
"""
SNAPSHOT_METADATA_CLASS = ExplorationSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = ExplorationSnapshotContentModel
ALLOW_REVERT = True
# What this exploration is called.
title = ndb.StringProperty(required=True)
# The category this exploration belongs to.
category = ndb.StringProperty(required=True, indexed=True)
# The objective of this exploration.
objective = ndb.TextProperty(default='', indexed=False)
# The ISO 639-1 code for the language this exploration is written in.
language_code = ndb.StringProperty(
default=constants.DEFAULT_LANGUAGE_CODE, indexed=True)
# Tags (topics, skills, concepts, etc.) associated with this
# exploration.
tags = ndb.StringProperty(repeated=True, indexed=True)
# A blurb for this exploration.
blurb = ndb.TextProperty(default='', indexed=False)
# 'Author notes' for this exploration.
author_notes = ndb.TextProperty(default='', indexed=False)
# The version of the states blob schema.
states_schema_version = ndb.IntegerProperty(
required=True, default=0, indexed=True)
# The name of the initial state of this exploration.
init_state_name = ndb.StringProperty(required=True, indexed=False)
# A dict representing the states of this exploration. This dict should
# not be empty.
states = ndb.JsonProperty(default={}, indexed=False)
# The dict of parameter specifications associated with this exploration.
# Each specification is a dict whose keys are param names and whose values
# are each dicts with a single key, 'obj_type', whose value is a string.
param_specs = ndb.JsonProperty(default={}, indexed=False)
# The list of parameter changes to be performed once at the start of a
# reader's encounter with an exploration.
param_changes = ndb.JsonProperty(repeated=True, indexed=False)
# A boolean indicating whether automatic text-to-speech is enabled in
# this exploration.
auto_tts_enabled = ndb.BooleanProperty(default=True, indexed=True)
# DEPRECATED in v2.0.0.rc.2. Do not use. Retaining it here because deletion
# caused GAE to raise an error on fetching a specific version of the
# exploration model.
# TODO(sll): Fix this error and remove this property.
skill_tags = ndb.StringProperty(repeated=True, indexed=True)
# DEPRECATED in v2.0.1. Do not use.
# TODO(sll): Remove this property from the model.
default_skin = ndb.StringProperty(default='conversation_v1')
# DEPRECATED in v2.5.4. Do not use.
skin_customizations = ndb.JsonProperty(indexed=False)
@classmethod
def get_exploration_count(cls):
"""Returns the total number of explorations."""
return cls.get_all().count()
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(ExplorationModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
exp_rights = ExplorationRightsModel.get_by_id(self.id)
# TODO(msl): test if put_async() leads to any problems (make
# sure summary dicts get updated correctly when explorations
# are changed)
ExplorationCommitLogEntryModel(
id=('exploration-%s-%s' % (self.id, self.version)),
user_id=committer_id,
username=committer_username,
exploration_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=self.version,
post_commit_status=exp_rights.status,
post_commit_community_owned=exp_rights.community_owned,
post_commit_is_private=(
exp_rights.status == feconf.ACTIVITY_STATUS_PRIVATE)
).put_async()
class ExplorationRightsSnapshotMetadataModel(
base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for an exploration rights snapshot."""
pass
class ExplorationRightsSnapshotContentModel(
base_models.BaseSnapshotContentModel):
"""Storage model for the content of an exploration rights snapshot."""
pass
class ExplorationRightsModel(base_models.VersionedModel):
"""Storage model for rights related to an exploration.
The id of each instance is the id of the corresponding exploration.
"""
SNAPSHOT_METADATA_CLASS = ExplorationRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = ExplorationRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_ids of owners of this exploration.
owner_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to edit this exploration.
editor_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to view this exploration.
viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
# Whether this exploration is owned by the community.
community_owned = ndb.BooleanProperty(indexed=True, default=False)
# The exploration id which this exploration was cloned from. If None, this
# exploration was created from scratch.
cloned_from = ndb.StringProperty()
# For private explorations, whether this exploration can be viewed
# by anyone who has the URL. If the exploration is not private, this
# setting is ignored.
viewable_if_private = ndb.BooleanProperty(indexed=True, default=False)
# Time, in milliseconds, when the exploration was first published.
first_published_msec = ndb.FloatProperty(indexed=True, default=None)
# The publication status of this exploration.
status = ndb.StringProperty(
default=feconf.ACTIVITY_STATUS_PRIVATE, indexed=True,
choices=[
feconf.ACTIVITY_STATUS_PRIVATE,
feconf.ACTIVITY_STATUS_PUBLIC
]
)
def save(self, committer_id, commit_message, commit_cmds):
super(ExplorationRightsModel, self).commit(
committer_id, commit_message, commit_cmds)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(ExplorationRightsModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
# Create and delete events will already be recorded in the
# ExplorationModel.
if commit_type not in ['create', 'delete']:
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
# TODO(msl): test if put_async() leads to any problems (make
# sure summary dicts get updated correctly when explorations
# are changed)
ExplorationCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
username=committer_username,
exploration_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=self.status,
post_commit_community_owned=self.community_owned,
post_commit_is_private=(
self.status == feconf.ACTIVITY_STATUS_PRIVATE)
).put_async()
class ExplorationCommitLogEntryModel(base_models.BaseModel):
"""Log of commits to explorations.
A new instance of this model is created and saved every time a commit to
ExplorationModel or ExplorationRightsModel occurs.
The id for this model is of the form
'exploration-{{EXP_ID}}-{{EXP_VERSION}}'.
"""
# Update superclass model to make these properties indexed.
created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)
# The id of the user.
user_id = ndb.StringProperty(indexed=True, required=True)
# The username of the user, at the time of the edit.
username = ndb.StringProperty(indexed=True, required=True)
# The id of the exploration being edited.
exploration_id = ndb.StringProperty(indexed=True, required=True)
# The type of the commit: 'create', 'revert', 'edit', 'delete'.
commit_type = ndb.StringProperty(indexed=True, required=True)
# The commit message.
commit_message = ndb.TextProperty(indexed=False)
# The commit_cmds dict for this commit.
commit_cmds = ndb.JsonProperty(indexed=False, required=True)
# The version number of the exploration after this commit. Only populated
# for commits to an exploration (as opposed to its rights, etc.)
version = ndb.IntegerProperty()
# The status of the exploration after the edit event ('private', 'public').
post_commit_status = ndb.StringProperty(indexed=True, required=True)
# Whether the exploration is community-owned after the edit event.
post_commit_community_owned = ndb.BooleanProperty(indexed=True)
# Whether the exploration is private after the edit event. Having a
# separate field for this makes queries faster, since an equality query
# on this property is faster than an inequality query on
# post_commit_status.
post_commit_is_private = ndb.BooleanProperty(indexed=True)
@classmethod
def get_all_commits(cls, page_size, urlsafe_start_cursor):
"""Fetches a list of all the commits sorted by their last updated
attribute.
Args:
page_size: int. The maximum number of entities to be returned.
urlsafe_start_cursor: str or None. If provided, the list of
returned entities starts from this datastore cursor.
Otherwise, the returned entities start from the beginning
of the full list of entities.
Returns:
3-tuple of (results, cursor, more) as described in fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass,
where:
results: List of query results.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this will
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
return cls._fetch_page_sorted_by_last_updated(
cls.query(), page_size, urlsafe_start_cursor)
@classmethod
def get_all_non_private_commits(
cls, page_size, urlsafe_start_cursor, max_age=None):
"""Fetches a list of all the non-private commits sorted by their
last updated attribute.
Args:
page_size: int. The maximum number of entities to be returned.
urlsafe_start_cursor: str or None. If provided, the list of
returned entities starts from this datastore cursor.
Otherwise, the returned entities start from the beginning
of the full list of entities.
max_age: datetime.timedelta. The maximum time duration within which
commits are needed.
Returns:
3-tuple of (results, cursor, more) which were created which were
created no earlier than max_age before the current time where:
results: List of query results.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this will
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
if not isinstance(max_age, datetime.timedelta) and max_age is not None:
raise ValueError(
'max_age must be a datetime.timedelta instance or None.')
query = cls.query(cls.post_commit_is_private == False) # pylint: disable=singleton-comparison
if max_age:
query = query.filter(
cls.last_updated >= datetime.datetime.utcnow() - max_age)
return cls._fetch_page_sorted_by_last_updated(
query, page_size, urlsafe_start_cursor)
class ExpSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia exploration.
This should be used whenever the content blob of the exploration is not
needed (e.g. in search results, etc).
A ExpSummaryModel instance stores the following information:
id, title, category, objective, language_code, tags,
last_updated, created_on, status (private, public),
community_owned, owner_ids, editor_ids,
viewer_ids, version.
The key of each instance is the exploration id.
"""
# What this exploration is called.
title = ndb.StringProperty(required=True)
# The category this exploration belongs to.
category = ndb.StringProperty(required=True, indexed=True)
# The objective of this exploration.
objective = ndb.TextProperty(required=True, indexed=False)
# The ISO 639-1 code for the language this exploration is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# Tags associated with this exploration.
tags = ndb.StringProperty(repeated=True, indexed=True)
# Aggregate user-assigned ratings of the exploration
ratings = ndb.JsonProperty(default=None, indexed=False)
# Scaled average rating for the exploration.
scaled_average_rating = ndb.FloatProperty(indexed=True)
# Time when the exploration model was last updated (not to be
# confused with last_updated, which is the time when the
# exploration *summary* model was last updated)
exploration_model_last_updated = ndb.DateTimeProperty(indexed=True)
# Time when the exploration model was created (not to be confused
# with created_on, which is the time when the exploration *summary*
# model was created)
exploration_model_created_on = ndb.DateTimeProperty(indexed=True)
# Time when the exploration was first published.
first_published_msec = ndb.FloatProperty(indexed=True)
# The publication status of this exploration.
status = ndb.StringProperty(
default=feconf.ACTIVITY_STATUS_PRIVATE, indexed=True,
choices=[
feconf.ACTIVITY_STATUS_PRIVATE,
feconf.ACTIVITY_STATUS_PUBLIC
]
)
# Whether this exploration is owned by the community.
community_owned = ndb.BooleanProperty(required=True, indexed=True)
# The user_ids of owners of this exploration.
owner_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to edit this exploration.
editor_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to view this exploration.
viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who have contributed (humans who have made a
# positive (not just a revert) change to the exploration's content)
contributor_ids = ndb.StringProperty(indexed=True, repeated=True)
# A dict representing the contributors of non-trivial commits to this
# exploration. Each key of this dict is a user_id, and the corresponding
# value is the number of non-trivial commits that the user has made.
contributors_summary = ndb.JsonProperty(default={}, indexed=False)
# The version number of the exploration after this commit. Only populated
# for commits to an exploration (as opposed to its rights, etc.)
version = ndb.IntegerProperty()
@classmethod
def get_non_private(cls):
"""Returns an iterable with non-private ExpSummary models.
Returns:
iterable. An iterable with non-private ExpSummary models.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status != feconf.ACTIVITY_STATUS_PRIVATE
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_top_rated(cls, limit):
"""Fetches the top-rated exp summaries that are public in descending
order of scaled_average_rating.
Args:
limit: int. The maximum number of results to return.
Returns:
iterable. An iterable with the top rated exp summaries that are
public in descending order of scaled_average_rating.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PUBLIC
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).order(
-ExpSummaryModel.scaled_average_rating
).fetch(limit)
@classmethod
def get_private_at_least_viewable(cls, user_id):
"""Fetches private exp summaries that are at least viewable by the
given user.
Args:
user_id: The id of the given user.
Returns:
iterable. An iterable with private exp summaries that are at least
viewable by the given user.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PRIVATE
).filter(
ndb.OR(ExpSummaryModel.owner_ids == user_id,
ExpSummaryModel.editor_ids == user_id,
ExpSummaryModel.viewer_ids == user_id)
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_at_least_editable(cls, user_id):
"""Fetches exp summaries that are at least editable by the given user.
Args:
user_id: The id of the given user.
Returns:
iterable. An iterable with exp summaries that are at least
editable by the given user.
"""
return ExpSummaryModel.query().filter(
ndb.OR(ExpSummaryModel.owner_ids == user_id,
ExpSummaryModel.editor_ids == user_id)
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_recently_published(cls, limit):
"""Fetches exp summaries that are recently published.
Args:
limit: int. The maximum number of results to return.
Returns:
An iterable with exp summaries that are recently published. The
returned list is sorted by the time of publication with latest
being first in the list.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status == feconf.ACTIVITY_STATUS_PUBLIC
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).order(
-ExpSummaryModel.first_published_msec
).fetch(limit)
class StateIdMappingModel(base_models.BaseModel):
"""State ID model for Oppia explorations.
This model maps each exploration version's state to a unique id.
Note: use the state id only for derived data, but not for data that’s
regarded as the source of truth, as the rules for assigning state id may
change in future.
The key of each instance is a combination of exploration id and version.
"""
# The exploration id whose states are mapped.
exploration_id = ndb.StringProperty(indexed=True, required=True)
# The version of the exploration.
exploration_version = ndb.IntegerProperty(indexed=True, required=True)
# A dict which maps each state name to a unique id.
state_names_to_ids = ndb.JsonProperty(required=True)
# Latest state id that has been assigned to any of the states in any of
# of the versions of given exploration. New state IDs should be assigned
# from this value + 1.
largest_state_id_used = ndb.IntegerProperty(indexed=True, required=True)
@classmethod
def _generate_instance_id(cls, exp_id, exp_version):
"""Generates ID of the state id mapping model instance.
Args:
exp_id: str. The exploration id whose states are mapped.
exp_version: int. The version of the exploration.
"""
return '%s.%d' % (exp_id, exp_version)
@classmethod
def create(
cls, exp_id, exp_version, state_names_to_ids,
largest_state_id_used):
"""Creates a new instance of state id mapping model.
Args:
exp_id: str. The exploration id whose states are mapped.
exp_version: int. The version of that exploration.
state_names_to_ids: dict. A dict storing state name to ids mapping.
largest_state_id_used: int. The largest integer so far that has been
used as a state ID for this exploration.
Returns:
StateIdMappingModel. Instance of the state id mapping model.
"""
instance_id = cls._generate_instance_id(exp_id, exp_version)
if cls.get_by_id(instance_id):
raise Exception(
'State id mapping model already exists for exploration %s,'
' version %d' % (exp_id, exp_version))
model = cls(
id=instance_id, exploration_id=exp_id,
exploration_version=exp_version,
state_names_to_ids=state_names_to_ids,
largest_state_id_used=largest_state_id_used)
model.put()
return model
@classmethod
def get_state_id_mapping_model(cls, exp_id, exp_version):
"""Retrieve state id mapping model from the datastore.
Args:
exp_id: str. The exploration id.
exp_version: int. The exploration version.
strict: bool. Whether to raise an error if no StateIdMappingModel
entry is found for the given exploration id and version.
Returns:
StateIdMappingModel. The model retrieved from the datastore.
"""
instance_id = cls._generate_instance_id(exp_id, exp_version)
instance = cls.get(instance_id)
return instance
@classmethod
def delete_state_id_mapping_models(cls, exp_id, exp_versions):
"""Removes state id mapping models present in state_id_mapping_models.
Args:
exp_id: The id of the exploration.
exp_versions: list(int). A list of exploration versions for which
the state id mapping model is to be deleted.
"""
keys = [
ndb.Key(cls, cls._generate_instance_id(exp_id, exp_version))
for exp_version in exp_versions]
ndb.delete_multi(keys)
| {
"content_hash": "fb133a7d81d7d338c720f8f328c3044b",
"timestamp": "",
"source": "github",
"line_count": 601,
"max_line_length": 102,
"avg_line_length": 42.818635607321134,
"alnum_prop": 0.6600217610942721,
"repo_name": "himanshu-dixit/oppia",
"id": "fc6904492f8c74bfaacecc708cc13e25be9adf5f",
"size": "26359",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/storage/exploration/gae_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "101439"
},
{
"name": "HTML",
"bytes": "899603"
},
{
"name": "JavaScript",
"bytes": "2950299"
},
{
"name": "Python",
"bytes": "3818679"
},
{
"name": "Shell",
"bytes": "47818"
}
],
"symlink_target": ""
} |
import sys
import json
import socket
import board
import game_state
#
# Create and parse messages for passing to and from the server
#
# in_XXX: parses a message from the server
# out_XXX: creates a message to send to the server
#
name = "This is an albatrocity!"
def out_handshake():
return {'me' : name}
def in_handshake(msg):
pass
def in_setup(msg):
b = board.Board.from_json(msg['map'])
gs = game_state.GameState1.from_board(b,
players = msg['punters'],
me = msg['punter'],
settings = msg.get('settings', {}))
return (b, gs)
def out_setup_online(gs):
msg = {'ready' : gs.me}
if gs.futures:
msg['futures'] = []
return msg
def out_setup_offline(gs):
msg = out_setup_online(gs)
msg['state'] = gs.serialize()
return msg
def apply_moves(gs, moves):
for move in moves['moves']:
m = move.get('claim', move.get('pass', move.get('splurge', None)))
who = m['punter']
gs.apply_move(who, game_state.Move.from_json(gs, move))
# gs is None for offline mode, or a valid game state for online mode
# Returns new game state and None if game is continuing or scores if game is done.
def in_move(msg, gs = None):
if gs is None:
gs = game_state.GameState1.deserialize(msg['state'])
if 'move' in msg:
apply_moves(gs, msg['move'])
return (gs, None)
if 'stop' in msg:
apply_moves(gs, msg['stop'])
return (gs, msg['stop']['scores'])
def out_move_online(move):
return move.to_json()
def out_move_offline(gs, move):
msg = out_move_online(move)
msg['state'] = gs.serialize()
return msg
def wrap(msg):
s = json.dumps(msg)
return str(len(s)) + ':' + s
def send_offline(msg):
sys.stdout.write(wrap(msg))
sys.stdout.flush()
def read_offline(nbytes):
return sys.stdin.read(nbytes)
url = 'punter.inf.ed.ac.uk'
buffer_size = 4096
class PipeOnline:
def __init__(self, port):
print("Opening socket to port {}".format(port))
self.socket = socket.create_connection((url, port))
self.buffer = bytes()
def send(self, msg):
self.socket.sendall(wrap(msg).encode('ascii'))
def read(self, nbytes):
result = bytes()
while len(result) + len(self.buffer) < nbytes:
result = result + self.buffer
self.buffer = self.socket.recv(buffer_size)
k = nbytes - len(result)
result = result + self.buffer[:k]
self.buffer = self.buffer[k:]
return result.decode('ascii')
def receive(read):
size = 0
while True:
c = read(1)
if c == ':':
break
else:
size = 10 * size + int(c)
return json.loads(read(size))
# Returns game state
def client_offline(choose_move):
# Handshake
send_offline(out_handshake())
in_handshake(receive(read_offline))
msg = receive(read_offline)
if 'map' in msg:
# Setup
b, gs = in_setup(msg)
send_offline(out_setup_offline(gs))
return gs
if ('move' in msg) or ('stop' in msg):
gs, scores = in_move(msg)
if scores is None:
send_offline(out_move_offline(gs, choose_move(gs)))
return gs
class ClientOnline:
def __init__(self, port):
self.pipe = PipeOnline(port)
# Handshake
print("Handshake...")
self.pipe.send(out_handshake())
in_handshake(receive(self.pipe.read))
# Setup
print("Setup...")
b, gs = in_setup(receive(self.pipe.read))
self.pipe.send(out_setup_online(gs))
print("Board summary", b.summary())
self.gs = gs
self.listen()
def listen(self):
print("Receiving moves...")
gs, scores = in_move(receive(self.pipe.read), self.gs)
self.gs = gs
self.waiting_for_move = (scores is None)
if scores is not None:
self.scores = scores
def make_move(self, move):
self.pipe.send(out_move_online(move))
self.listen()
| {
"content_hash": "c86aabcfa5527e5ce88bfbe3f324859d",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 82,
"avg_line_length": 25.01851851851852,
"alnum_prop": 0.5845053047125586,
"repo_name": "estansifer/icfpc2017",
"id": "61eba281000091f01b2a82ae139226cb8be2d85b",
"size": "4053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40122"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
import zagoload
def download(source):
def info(ss):
import sys ;sys.stdout.write(ss + u'\n')
def onDownload(fileSize,downSize, downSpeed):
info( u'{0:3}% - {1:8}/{2}, {3:4.0f}kb/s'.format(int(100*downSize/fileSize), downSize, fileSize, downSpeed/1024))
ff = zagoload.load(source,onDownload=onDownload)
if ff.valid:
info( u'{0} => {1}'.format(ff.source, ff.target) )
else:
info( 'Failed to download {0}'.format(ff.source) )
download('http://download.thinkbroadband.com/5MB.zip')
| {
"content_hash": "165ecaa73f148a57f032539290710f2c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 117,
"avg_line_length": 42,
"alnum_prop": 0.6726190476190477,
"repo_name": "napuzba/zagoload",
"id": "afb176df6f12587a768d893c26fad58592554500",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/sample_05.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25910"
}
],
"symlink_target": ""
} |
'''
Created on Mar 16, 2011
@author: cgueret
'''
from Resources import Museum, Painting
if __name__ == '__main__':
#a = Museum('altesnational')
#print a.to_rdfxml()
b = Painting('altesnational/woman-at-a-window')
print b.to_rdfxml() | {
"content_hash": "16ca2ded790d775db64efaad4768b9b6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.632,
"repo_name": "cgueret/GoogleArt-wrapper",
"id": "535dff94450db55d653f2a96215821cd92ba98fc",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15770"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SscData', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=127)),
('info', models.CharField(max_length=255)),
('mail', models.CharField(max_length=255)),
('imgUrl', models.CharField(max_length=255)),
],
options={
'ordering': ['id'],
},
),
migrations.AlterModelOptions(
name='timetable',
options={'ordering': ['id']},
),
]
| {
"content_hash": "bd2265863817b5d6f89d09dcb846de2e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 28.966666666666665,
"alnum_prop": 0.5132336018411968,
"repo_name": "jakdor/SSCAndroidApp",
"id": "58bcf2569d9f50b7bcae8481bcd147da960aed6b",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "API/ssc/SscData/migrations/0002_auto_20170815_1529.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "228"
},
{
"name": "Java",
"bytes": "113374"
},
{
"name": "Python",
"bytes": "15471"
}
],
"symlink_target": ""
} |
"""Generate a PDF using the DocRaptor url style."""
from docraptor import DocRaptor
def main():
"""Generate a PDF with specified url."""
docraptor = DocRaptor()
print("Create test_basic_url.pdf")
with open("test_basic_url.pdf", "wb") as pdf_file:
pdf_file.write(
docraptor.create(
{"document_url": "http://docraptor.com", "test": True}
).content
)
if __name__ == "__main__":
main()
| {
"content_hash": "22e857af1a2a6edaeab9469e991019f0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 24.36842105263158,
"alnum_prop": 0.5637149028077754,
"repo_name": "jkeyes/python-docraptor",
"id": "a40765656659541fa8c4225d56bf3745ee6dff1b",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/basic_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15360"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
console_scripts = ['eth=pyethereum.eth:main',
'pyethtool=tools.pyethtool_cli:main']
setup(name="pyethereum",
version='0.0.1',
packages=find_packages("."),
install_requires=[
'six', 'leveldb', 'bitcoin', 'pysha3',
'miniupnpc',
'bottle', 'waitress'],
entry_points=dict(console_scripts=console_scripts))
| {
"content_hash": "f4282e9910cf37146ec59ec5568ac892",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 32.30769230769231,
"alnum_prop": 0.6047619047619047,
"repo_name": "ebuchman/daoist_protocol",
"id": "d94b7d908e3695f906e172e8829983e3f5184206",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1317"
},
{
"name": "JavaScript",
"bytes": "25740"
},
{
"name": "Python",
"bytes": "235303"
}
],
"symlink_target": ""
} |
"""Tests common to all coder implementations."""
from __future__ import absolute_import
import logging
import math
import sys
import unittest
from builtins import range
import pytest
from apache_beam.coders import proto2_coder_test_messages_pb2 as test_message
from apache_beam.coders import coders
from apache_beam.internal import pickler
from apache_beam.runners import pipeline_context
from apache_beam.transforms import window
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils import timestamp
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from . import observable
# Defined out of line for picklability.
class CustomCoder(coders.Coder):
def encode(self, x):
return str(x+1).encode('utf-8')
def decode(self, encoded):
return int(encoded) - 1
# These tests need to all be run in the same process due to the asserts
# in tearDownClass.
@pytest.mark.no_xdist
class CodersTest(unittest.TestCase):
# These class methods ensure that we test each defined coder in both
# nested and unnested context.
@classmethod
def setUpClass(cls):
cls.seen = set()
cls.seen_nested = set()
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
@classmethod
def tearDownClass(cls):
standard = set(c
for c in coders.__dict__.values()
if isinstance(c, type) and issubclass(c, coders.Coder) and
'Base' not in c.__name__)
standard -= set([coders.Coder,
coders.AvroGenericCoder,
coders.DeterministicProtoCoder,
coders.FastCoder,
coders.ProtoCoder,
coders.RunnerAPICoderHolder,
coders.ToStringCoder])
assert not standard - cls.seen, standard - cls.seen
assert not standard - cls.seen_nested, standard - cls.seen_nested
@classmethod
def _observe(cls, coder):
cls.seen.add(type(coder))
cls._observe_nested(coder)
@classmethod
def _observe_nested(cls, coder):
if isinstance(coder, coders.TupleCoder):
for c in coder.coders():
cls.seen_nested.add(type(c))
cls._observe_nested(c)
def check_coder(self, coder, *values, **kwargs):
context = kwargs.pop('context', pipeline_context.PipelineContext())
test_size_estimation = kwargs.pop('test_size_estimation', True)
assert not kwargs
self._observe(coder)
for v in values:
self.assertEqual(v, coder.decode(coder.encode(v)))
if test_size_estimation:
self.assertEqual(coder.estimate_size(v),
len(coder.encode(v)))
self.assertEqual(coder.estimate_size(v),
coder.get_impl().estimate_size(v))
self.assertEqual(coder.get_impl().get_estimated_size_and_observables(v),
(coder.get_impl().estimate_size(v), []))
copy1 = pickler.loads(pickler.dumps(coder))
copy2 = coders.Coder.from_runner_api(coder.to_runner_api(context), context)
for v in values:
self.assertEqual(v, copy1.decode(copy2.encode(v)))
if coder.is_deterministic():
self.assertEqual(copy1.encode(v), copy2.encode(v))
def test_custom_coder(self):
self.check_coder(CustomCoder(), 1, -10, 5)
self.check_coder(coders.TupleCoder((CustomCoder(), coders.BytesCoder())),
(1, b'a'), (-10, b'b'), (5, b'c'))
def test_pickle_coder(self):
self.check_coder(coders.PickleCoder(), 'a', 1, 1.5, (1, 2, 3))
def test_deterministic_coder(self):
coder = coders.FastPrimitivesCoder()
deterministic_coder = coders.DeterministicFastPrimitivesCoder(coder, 'step')
self.check_coder(deterministic_coder, 'a', 1, 1.5, (1, 2, 3))
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, dict())
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, [1, dict()])
self.check_coder(coders.TupleCoder((deterministic_coder, coder)),
(1, dict()), ('a', [dict()]))
def test_dill_coder(self):
cell_value = (lambda x: lambda: x)(0).__closure__[0]
self.check_coder(coders.DillCoder(), 'a', 1, cell_value)
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), coders.DillCoder())),
(1, cell_value))
def test_fast_primitives_coder(self):
coder = coders.FastPrimitivesCoder(coders.SingletonCoder(len))
self.check_coder(coder, None, 1, -1, 1.5, b'str\0str', u'unicode\0\u0101')
self.check_coder(coder, (), (1, 2, 3))
self.check_coder(coder, [], [1, 2, 3])
self.check_coder(coder, dict(), {'a': 'b'}, {0: dict(), 1: len})
self.check_coder(coder, set(), {'a', 'b'})
self.check_coder(coder, True, False)
self.check_coder(coder, len)
self.check_coder(coders.TupleCoder((coder,)), ('a',), (1,))
def test_fast_primitives_coder_large_int(self):
coder = coders.FastPrimitivesCoder()
self.check_coder(coder, 10 ** 100)
def test_bytes_coder(self):
self.check_coder(coders.BytesCoder(), b'a', b'\0', b'z' * 1000)
def test_bool_coder(self):
self.check_coder(coders.BooleanCoder(), True, False)
def test_varint_coder(self):
# Small ints.
self.check_coder(coders.VarIntCoder(), *range(-10, 10))
# Multi-byte encoding starts at 128
self.check_coder(coders.VarIntCoder(), *range(120, 140))
# Large values
MAX_64_BIT_INT = 0x7fffffffffffffff
self.check_coder(coders.VarIntCoder(),
*[int(math.pow(-1, k) * math.exp(k))
for k in range(0, int(math.log(MAX_64_BIT_INT)))])
def test_float_coder(self):
self.check_coder(coders.FloatCoder(),
*[float(0.1 * x) for x in range(-100, 100)])
self.check_coder(coders.FloatCoder(),
*[float(2 ** (0.1 * x)) for x in range(-100, 100)])
self.check_coder(coders.FloatCoder(), float('-Inf'), float('Inf'))
self.check_coder(
coders.TupleCoder((coders.FloatCoder(), coders.FloatCoder())),
(0, 1), (-100, 100), (0.5, 0.25))
def test_singleton_coder(self):
a = 'anything'
b = 'something else'
self.check_coder(coders.SingletonCoder(a), a)
self.check_coder(coders.SingletonCoder(b), b)
self.check_coder(coders.TupleCoder((coders.SingletonCoder(a),
coders.SingletonCoder(b))), (a, b))
def test_interval_window_coder(self):
self.check_coder(coders.IntervalWindowCoder(),
*[window.IntervalWindow(x, y)
for x in [-2**52, 0, 2**52]
for y in range(-100, 100)])
self.check_coder(
coders.TupleCoder((coders.IntervalWindowCoder(),)),
(window.IntervalWindow(0, 10),))
def test_timestamp_coder(self):
self.check_coder(coders.TimestampCoder(),
*[timestamp.Timestamp(micros=x) for x in (-1000, 0, 1000)])
self.check_coder(coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567000),
timestamp.Timestamp(micros=1234567000))
self.check_coder(coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567890123456000),
timestamp.Timestamp(micros=1234567890123456000))
self.check_coder(
coders.TupleCoder((coders.TimestampCoder(), coders.BytesCoder())),
(timestamp.Timestamp.of(27), b'abc'))
def test_timer_coder(self):
self.check_coder(coders._TimerCoder(coders.BytesCoder()),
*[{'timestamp': timestamp.Timestamp(micros=x),
'payload': b'xyz'}
for x in (-3000, 0, 3000)])
self.check_coder(
coders.TupleCoder((coders._TimerCoder(coders.VarIntCoder()),)),
({'timestamp': timestamp.Timestamp.of(37000), 'payload': 389},))
def test_tuple_coder(self):
kv_coder = coders.TupleCoder((coders.VarIntCoder(), coders.BytesCoder()))
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:pair',
'is_pair_like': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.BytesCoder().as_cloud_object()],
},
kv_coder.as_cloud_object())
# Test binary representation
self.assertEqual(
b'\x04abc',
kv_coder.encode((4, b'abc')))
# Test unnested
self.check_coder(
kv_coder,
(1, b'a'),
(-2, b'a' * 100),
(300, b'abc\0' * 5))
# Test nested
self.check_coder(
coders.TupleCoder(
(coders.TupleCoder((coders.PickleCoder(), coders.VarIntCoder())),
coders.StrUtf8Coder(),
coders.BooleanCoder())),
((1, 2), 'a', True),
((-2, 5), u'a\u0101' * 100, False),
((300, 1), 'abc\0' * 5, True))
def test_tuple_sequence_coder(self):
int_tuple_coder = coders.TupleSequenceCoder(coders.VarIntCoder())
self.check_coder(int_tuple_coder, (1, -1, 0), (), tuple(range(1000)))
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), int_tuple_coder)),
(1, (1, 2, 3)))
def test_base64_pickle_coder(self):
self.check_coder(coders.Base64PickleCoder(), 'a', 1, 1.5, (1, 2, 3))
def test_utf8_coder(self):
self.check_coder(coders.StrUtf8Coder(), 'a', u'ab\u00FF', u'\u0101\0')
def test_iterable_coder(self):
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:stream',
'is_stream_like': True,
'component_encodings': [coders.VarIntCoder().as_cloud_object()]
},
iterable_coder.as_cloud_object())
# Test unnested
self.check_coder(iterable_coder,
[1], [-1, 0, 100])
# Test nested
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(),
coders.IterableCoder(coders.VarIntCoder()))),
(1, [1, 2, 3]))
def test_iterable_coder_unknown_length(self):
# Empty
self._test_iterable_coder_of_unknown_length(0)
# Single element
self._test_iterable_coder_of_unknown_length(1)
# Multiple elements
self._test_iterable_coder_of_unknown_length(100)
# Multiple elements with underlying stream buffer overflow.
self._test_iterable_coder_of_unknown_length(80000)
def _test_iterable_coder_of_unknown_length(self, count):
def iter_generator(count):
for i in range(count):
yield i
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
self.assertCountEqual(list(iter_generator(count)),
iterable_coder.decode(
iterable_coder.encode(iter_generator(count))))
def test_windowedvalue_coder_paneinfo(self):
coder = coders.WindowedValueCoder(coders.VarIntCoder(),
coders.GlobalWindowCoder())
test_paneinfo_values = [
windowed_value.PANE_INFO_UNKNOWN,
windowed_value.PaneInfo(
True, True, windowed_value.PaneInfoTiming.EARLY, 0, -1),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 0, 0),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 10, 0),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 0, 23),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 12, 23),
windowed_value.PaneInfo(
False, False, windowed_value.PaneInfoTiming.LATE, 0, 123),]
test_values = [windowed_value.WindowedValue(123, 234, (GlobalWindow(),), p)
for p in test_paneinfo_values]
# Test unnested.
self.check_coder(coder, windowed_value.WindowedValue(
123, 234, (GlobalWindow(),), windowed_value.PANE_INFO_UNKNOWN))
for value in test_values:
self.check_coder(coder, value)
# Test nested.
for value1 in test_values:
for value2 in test_values:
self.check_coder(coders.TupleCoder((coder, coder)), (value1, value2))
def test_windowed_value_coder(self):
coder = coders.WindowedValueCoder(coders.VarIntCoder(),
coders.GlobalWindowCoder())
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:windowed_value',
'is_wrapper': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.GlobalWindowCoder().as_cloud_object(),
],
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x7f\xdf;dZ\x1c\xac\t\x00\x00\x00\x01\x0f\x01',
coder.encode(window.GlobalWindows.windowed_value(1)))
# Test decoding large timestamp
self.assertEqual(
coder.decode(b'\x7f\xdf;dZ\x1c\xac\x08\x00\x00\x00\x01\x0f\x00'),
windowed_value.create(0, MIN_TIMESTAMP.micros, (GlobalWindow(),)))
# Test unnested
self.check_coder(
coders.WindowedValueCoder(coders.VarIntCoder()),
windowed_value.WindowedValue(3, -100, ()),
windowed_value.WindowedValue(-1, 100, (1, 2, 3)))
# Test Global Window
self.check_coder(
coders.WindowedValueCoder(coders.VarIntCoder(),
coders.GlobalWindowCoder()),
window.GlobalWindows.windowed_value(1))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.WindowedValueCoder(coders.FloatCoder()),
coders.WindowedValueCoder(coders.StrUtf8Coder()))),
(windowed_value.WindowedValue(1.5, 0, ()),
windowed_value.WindowedValue("abc", 10, ('window',))))
def test_proto_coder(self):
# For instructions on how these test proto message were generated,
# see coders_test.py
ma = test_message.MessageA()
mab = ma.field2.add()
mab.field1 = True
ma.field1 = u'hello world'
mb = test_message.MessageA()
mb.field1 = u'beam'
proto_coder = coders.ProtoCoder(ma.__class__)
self.check_coder(proto_coder, ma)
self.check_coder(coders.TupleCoder((proto_coder, coders.BytesCoder())),
(ma, b'a'), (mb, b'b'))
def test_global_window_coder(self):
coder = coders.GlobalWindowCoder()
value = window.GlobalWindow()
# Verify cloud object representation
self.assertEqual({'@type': 'kind:global_window'},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'', coder.encode(value))
self.assertEqual(value, coder.decode(b''))
# Test unnested
self.check_coder(coder, value)
# Test nested
self.check_coder(coders.TupleCoder((coder, coder)),
(value, value))
def test_length_prefix_coder(self):
coder = coders.LengthPrefixCoder(coders.BytesCoder())
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:length_prefix',
'component_encodings': [coders.BytesCoder().as_cloud_object()]
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x00', coder.encode(b''))
self.assertEqual(b'\x01a', coder.encode(b'a'))
self.assertEqual(b'\x02bc', coder.encode(b'bc'))
self.assertEqual(b'\xff\x7f' + b'z' * 16383, coder.encode(b'z' * 16383))
# Test unnested
self.check_coder(coder, b'', b'a', b'bc', b'def')
# Test nested
self.check_coder(coders.TupleCoder((coder, coder)),
(b'', b'a'),
(b'bc', b'def'))
def test_nested_observables(self):
class FakeObservableIterator(observable.ObservableMixin):
def __iter__(self):
return iter([1, 2, 3])
# Coder for elements from the observable iterator.
elem_coder = coders.VarIntCoder()
iter_coder = coders.TupleSequenceCoder(elem_coder)
# Test nested WindowedValue observable.
coder = coders.WindowedValueCoder(iter_coder)
observ = FakeObservableIterator()
value = windowed_value.WindowedValue(observ, 0, ())
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(value)[1],
[(observ, elem_coder.get_impl())])
# Test nested tuple observable.
coder = coders.TupleCoder((coders.StrUtf8Coder(), iter_coder))
value = (u'123', observ)
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(value)[1],
[(observ, elem_coder.get_impl())])
def test_state_backed_iterable_coder(self):
# pylint: disable=global-variable-undefined
# required for pickling by reference
global state
state = {}
def iterable_state_write(values, element_coder_impl):
token = b'state_token_%d' % len(state)
state[token] = [element_coder_impl.encode(e) for e in values]
return token
def iterable_state_read(token, element_coder_impl):
return [element_coder_impl.decode(s) for s in state[token]]
coder = coders.StateBackedIterableCoder(
coders.VarIntCoder(),
read_state=iterable_state_read,
write_state=iterable_state_write,
write_state_threshold=1)
context = pipeline_context.PipelineContext(
iterable_state_read=iterable_state_read,
iterable_state_write=iterable_state_write)
self.check_coder(
coder, [1, 2, 3], context=context, test_size_estimation=False)
# Ensure that state was actually used.
self.assertNotEqual(state, {})
self.check_coder(coders.TupleCoder((coder, coder)),
([1], [2, 3]),
context=context,
test_size_estimation=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "e62f0841380ca337d032d8efe6f36942",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 80,
"avg_line_length": 37.46764091858038,
"alnum_prop": 0.6197136011589681,
"repo_name": "RyanSkraba/beam",
"id": "122cbfd2842b9449bd5430ee29f5f4f94ea17319",
"size": "18732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/coders/coders_test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import permute.data as data
from numpy.testing import assert_equal
def test_botulinum():
""" Test that "Botulinum" data can be loaded. """
botulinum = data.botulinum()
assert_equal((botulinum.size, len(botulinum.dtype)), (80, 28))
def test_chrom17m():
""" Test that "chrom17m" data can be loaded. """
chrom17m = data.chrom17m()
assert_equal((chrom17m.size, len(chrom17m.dtype)), (10, 3))
def test_clinical_trial():
""" Test that "rb_clinical_trial" data can be loaded. """
clin = data.clinical_trial()
assert_equal((clin.size, len(clin.dtype)), (272, 15))
def test_confocal():
""" Test that "confocal" data can be loaded. """
confocal = data.confocal()
assert_equal((confocal.size, len(confocal.dtype)), (112, 17))
def test_germina():
""" Test that "germina" data can be loaded. """
germina = data.germina()
assert_equal((germina.size, len(germina.dtype)), (40, 5))
def test_kenya():
""" Test that "Kenya" data can be loaded. """
kenya = data.kenya()
assert_equal((kenya.size, len(kenya.dtype)), (16, 3))
def test_massaro_blair():
""" Test that "massaro_blair" data can be loaded. """
massaro_blair = data.massaro_blair()
assert_equal((massaro_blair.size, len(massaro_blair.dtype)), (29, 2))
def test_monachus():
""" Test that "monachus" data can be loaded. """
monachus = data.monachus()
assert_equal(monachus.size, 12)
assert_equal(len(monachus.dtype), 17)
def test_mult():
""" Test that "mult" data can be loaded. """
mult = data.mult()
assert_equal(mult.size, 16)
assert_equal(len(mult.dtype), 4)
def test_perch():
""" Test that "perch" data can be loaded. """
perch = data.perch()
assert_equal(perch.size, 108)
assert_equal(len(perch.dtype), 31)
def test_rats():
""" Test that "rats" data can be loaded. """
rats = data.rats()
assert_equal(rats.size, 36)
assert_equal(len(rats.dtype), 19)
def test_setig():
""" Test that "setig" data can be loaded. """
setig = data.setig()
assert_equal(setig.size, 334)
assert_equal(len(setig.dtype), 6)
def test_urology():
""" Test that "urology" data can be loaded. """
urology = data.urology()
assert_equal(urology.size, 481)
assert_equal(len(urology.dtype), 31)
def test_washing_test():
""" Test that "washing_test" data can be loaded. """
washing_test = data.washing_test()
assert_equal(washing_test.size, 800)
assert_equal(len(washing_test.dtype), 4)
def test_waterfalls():
""" Test that "waterfalls" data can be loaded. """
waterfalls = data.waterfalls()
assert_equal(waterfalls.size, 42)
assert_equal(len(waterfalls.dtype), 17)
def test_ipat():
""" Test that "ipat" data can be loaded. """
ipat = data.ipat()
assert_equal(ipat.size, 20)
assert_equal(len(ipat.dtype), 2)
def test_job():
""" Test that "job" data can be loaded. """
job = data.job()
assert_equal(job.size, 20)
assert_equal(len(job.dtype), 2)
def test_fly():
""" Test that "fly" data can be loaded. """
fly = data.fly()
assert_equal(fly.size, 70)
assert_equal(len(fly.dtype), 8)
def test_testosterone():
""" Test that "testosterone" data can be loaded. """
testosterone = data.testosterone()
assert_equal(testosterone.size, 11)
assert_equal(len(testosterone.dtype), 5)
def test_worms():
""" Test that "worms" data can be loaded. """
worms = data.worms()
assert_equal(worms.size, 18)
assert_equal(len(worms.dtype), 2)
| {
"content_hash": "f5ae8a752d405f0ee50ec8d38efa673c",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 73,
"avg_line_length": 26.773722627737225,
"alnum_prop": 0.6303162486368593,
"repo_name": "kellieotto/permute",
"id": "5a2abe2c343b2dfb19572415620b28fdc2ffd8ee",
"size": "3668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "permute/data/tests/test_data.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "591"
},
{
"name": "Python",
"bytes": "150992"
},
{
"name": "Shell",
"bytes": "620"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from ..utils import writeParamsToJson
class TestManagementCommands(TestCase):
def test_print_map(self):
params = {
'bbox': [-2, -2, 2, 2], 'layout': 'test_layout',
'map_file': './sunlumo_mapserver/test_data/test_sunlumo.qgs',
'layers': ['polygons', 'lines', 'points'],
'transparencies': [50, 0, 0]
}
fileName = writeParamsToJson(params)
call_command(
'print_map', fileName
)
with open(fileName + '.pdf', 'rb') as pdfFile:
# we just want to test if the PDF file in not blank
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_print_map_missing_args(self):
with self.assertRaises(CommandError):
call_command('print_map')
| {
"content_hash": "826b2bbc699ca08146803875c4e9e0ac",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 29.6875,
"alnum_prop": 0.5989473684210527,
"repo_name": "candela-it/sunlumo",
"id": "dbea98fb5dd5f6783b66739f00340973a604464e",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_project/sunlumo_mapserver/tests/test_management_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "394728"
},
{
"name": "HTML",
"bytes": "2964"
},
{
"name": "JavaScript",
"bytes": "364730"
},
{
"name": "Python",
"bytes": "99692"
},
{
"name": "Ruby",
"bytes": "900"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
import pytest
from tornado import web
from tornadose.handlers import EventSource, WebSocketSubscriber
@pytest.fixture
def app():
return web.Application()
def test_add_eventsource_handler(app, dummy_store):
app.add_handlers(".*$", [(r"/stream", EventSource, dict(store=dummy_store))])
def test_add_websocket_subscriber(app, dummy_store):
app.add_handlers(
".*$", [(r"/socket", WebSocketSubscriber, dict(store=dummy_store))]
)
| {
"content_hash": "c150725a43bde20a0ac0d8ed9634999e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 81,
"avg_line_length": 24,
"alnum_prop": 0.7039473684210527,
"repo_name": "mivade/tornadose",
"id": "546e4bf479a7628fb0fc5916a7bce1d916917a47",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "471"
},
{
"name": "Python",
"bytes": "15250"
}
],
"symlink_target": ""
} |
'''
'''
import os
import re
import sys
import phpass
import MySQLdb
import argparse
def auth(username,password):
UserDict = GetWPuser()
try:
password_hash = UserDict[username][2]
except KeyError:
return None
if len(password_hash) <= 32:
return None
total_md5 += 1
else:
wp_hasher = phpass.PasswordHash(8, True)
check = wp_hasher.check_password(password, password_hash)
if check:
return [username,UserDict[username][1],True]
else:
return [username,UserDict[username][1],False]
def GetWPuser():
cred = GetWPDBcredentials()
try:
con = MySQLdb.connect(cred['DB_HOST'], cred['DB_USER'], cred['DB_PASSWORD'], cred['DB_NAME'])
cur = con.cursor()
cur.execute("""
SELECT ID,user_login,user_pass FROM wp_users
""")
users = cur.fetchall()
cur.execute("""
SELECT user_id,meta_value FROM wordpress.wp_usermeta where meta_key="wp_capabilities";
""")
cap = cur.fetchall()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
finally:
if con:
con.close()
u_dict = {}
for (u_id,u_name,u_hash) in users:
for (cap_id,u_cap) in cap:
if cap_id == u_id:
break;
u_cap = u_cap.split('"')[1]
u_dict[u_name] = [u_id,u_cap,u_hash]
return u_dict
def GetWPDBcredentials(config_file="/var/www/wp-config.php"):
cred = {}
h_file = open(config_file,"r")
cred = {}
for l in h_file:
if l.find("DB_") != -1:
parts = l.split("'")
cred[parts[1]] = parts[3]
return cred
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-u', dest='user', help='Username')
parser.add_argument('-p', dest='password', default="", help='Password')
parser.add_argument('-c', dest='configfile', default="/var/www/wp-config.php", help='Wordpress config file.')
args = parser.parse_args()
#Where am I
path = os.path.abspath(os.path.dirname(sys.argv[0]))
#print "USER:" + args.user
#print "PASS:" + args.password
try:
res = auth(args.user,args.password)
for i in res:
print i
except:
print None
print None
print False
| {
"content_hash": "ac3d64dc9897cf3c91ef82c0335e8a72",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 113,
"avg_line_length": 22.08695652173913,
"alnum_prop": 0.5232283464566929,
"repo_name": "interactiveinstitute/NoderedWordpressAuth",
"id": "525b25fa6fc85a405613f878438858ca9fdc6391",
"size": "2558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WordpressAuth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4131"
},
{
"name": "Python",
"bytes": "2558"
}
],
"symlink_target": ""
} |
from .fetchers import NUDeploymentFailuresFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUBGPNeighbor(NURESTObject):
""" Represents a BGPNeighbor in the VSD
Notes:
Virtual Cloud Services (VCS) in the data center BGP PE-CE is configured at vport level . Network Service Gateways (NSG) BGP is configured at subnet level.
"""
__rest_name__ = "bgpneighbor"
__resource_name__ = "bgpneighbors"
## Constants
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a BGPNeighbor instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> bgpneighbor = NUBGPNeighbor(id=u'xxxx-xxx-xxx-xxx', name=u'BGPNeighbor')
>>> bgpneighbor = NUBGPNeighbor(data=my_dict)
"""
super(NUBGPNeighbor, self).__init__()
# Read/Write Attributes
self._bfd_enabled = None
self._ip_type = None
self._ipv6_address = None
self._name = None
self._dampening_enabled = None
self._peer_as = None
self._peer_configuration = None
self._peer_ip = None
self._description = None
self._session = None
self._embedded_metadata = None
self._entity_scope = None
self._domain_service_label = None
self._associated_export_routing_policy_id = None
self._associated_import_routing_policy_id = None
self._external_id = None
self.expose_attribute(local_name="bfd_enabled", remote_name="BFDEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="dampening_enabled", remote_name="dampeningEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer_as", remote_name="peerAS", attribute_type=int, is_required=True, is_unique=False)
self.expose_attribute(local_name="peer_configuration", remote_name="peerConfiguration", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer_ip", remote_name="peerIP", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="session", remote_name="session", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="domain_service_label", remote_name="domainServiceLabel", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_export_routing_policy_id", remote_name="associatedExportRoutingPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_import_routing_policy_id", remote_name="associatedImportRoutingPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def bfd_enabled(self):
""" Get bfd_enabled value.
Notes:
Enable or disable Bidirectional Forwarding Detection for this BGP neighbor. Not Applicable for third-party Netconf Gateways.
This attribute is named `BFDEnabled` in VSD API.
"""
return self._bfd_enabled
@bfd_enabled.setter
def bfd_enabled(self, value):
""" Set bfd_enabled value.
Notes:
Enable or disable Bidirectional Forwarding Detection for this BGP neighbor. Not Applicable for third-party Netconf Gateways.
This attribute is named `BFDEnabled` in VSD API.
"""
self._bfd_enabled = value
@property
def ip_type(self):
""" Get ip_type value.
Notes:
It can be either IPv4 or IPv6
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
It can be either IPv4 or IPv6
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
Peer IPv6 address
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
Peer IPv6 address
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def name(self):
""" Get name value.
Notes:
Name of the peer
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the peer
"""
self._name = value
@property
def dampening_enabled(self):
""" Get dampening_enabled value.
Notes:
Enable/disable route flap damping.
This attribute is named `dampeningEnabled` in VSD API.
"""
return self._dampening_enabled
@dampening_enabled.setter
def dampening_enabled(self, value):
""" Set dampening_enabled value.
Notes:
Enable/disable route flap damping.
This attribute is named `dampeningEnabled` in VSD API.
"""
self._dampening_enabled = value
@property
def peer_as(self):
""" Get peer_as value.
Notes:
Autonomous System (AS) value to be used when establishing a session with the remote peer if it is different from the global BGP router autonomous system number.
This attribute is named `peerAS` in VSD API.
"""
return self._peer_as
@peer_as.setter
def peer_as(self, value):
""" Set peer_as value.
Notes:
Autonomous System (AS) value to be used when establishing a session with the remote peer if it is different from the global BGP router autonomous system number.
This attribute is named `peerAS` in VSD API.
"""
self._peer_as = value
@property
def peer_configuration(self):
""" Get peer_configuration value.
Notes:
BGP Peer session configuration and default policies.
This attribute is named `peerConfiguration` in VSD API.
"""
return self._peer_configuration
@peer_configuration.setter
def peer_configuration(self, value):
""" Set peer_configuration value.
Notes:
BGP Peer session configuration and default policies.
This attribute is named `peerConfiguration` in VSD API.
"""
self._peer_configuration = value
@property
def peer_ip(self):
""" Get peer_ip value.
Notes:
IP Address of the neighbor. If the neighbor is attached to a host vPort this is optional or must be the same as the host's IP. For uplink or bridge vPort neighbors the IP address must be specified
This attribute is named `peerIP` in VSD API.
"""
return self._peer_ip
@peer_ip.setter
def peer_ip(self, value):
""" Set peer_ip value.
Notes:
IP Address of the neighbor. If the neighbor is attached to a host vPort this is optional or must be the same as the host's IP. For uplink or bridge vPort neighbors the IP address must be specified
This attribute is named `peerIP` in VSD API.
"""
self._peer_ip = value
@property
def description(self):
""" Get description value.
Notes:
Short description for this peer
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Short description for this peer
"""
self._description = value
@property
def session(self):
""" Get session value.
Notes:
neighbor session yang blob
"""
return self._session
@session.setter
def session(self, value):
""" Set session value.
Notes:
neighbor session yang blob
"""
self._session = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def domain_service_label(self):
""" Get domain_service_label value.
Notes:
Service ID or external label given to Domain
This attribute is named `domainServiceLabel` in VSD API.
"""
return self._domain_service_label
@domain_service_label.setter
def domain_service_label(self, value):
""" Set domain_service_label value.
Notes:
Service ID or external label given to Domain
This attribute is named `domainServiceLabel` in VSD API.
"""
self._domain_service_label = value
@property
def associated_export_routing_policy_id(self):
""" Get associated_export_routing_policy_id value.
Notes:
export policy ID
This attribute is named `associatedExportRoutingPolicyID` in VSD API.
"""
return self._associated_export_routing_policy_id
@associated_export_routing_policy_id.setter
def associated_export_routing_policy_id(self, value):
""" Set associated_export_routing_policy_id value.
Notes:
export policy ID
This attribute is named `associatedExportRoutingPolicyID` in VSD API.
"""
self._associated_export_routing_policy_id = value
@property
def associated_import_routing_policy_id(self):
""" Get associated_import_routing_policy_id value.
Notes:
import routing policy ID
This attribute is named `associatedImportRoutingPolicyID` in VSD API.
"""
return self._associated_import_routing_policy_id
@associated_import_routing_policy_id.setter
def associated_import_routing_policy_id(self, value):
""" Set associated_import_routing_policy_id value.
Notes:
import routing policy ID
This attribute is named `associatedImportRoutingPolicyID` in VSD API.
"""
self._associated_import_routing_policy_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| {
"content_hash": "d71dd31b391825705ee453548ee10365",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 296,
"avg_line_length": 30.147169811320754,
"alnum_prop": 0.5724120665915634,
"repo_name": "nuagenetworks/vspk-python",
"id": "c8bcd1e52ab5c10cce69332fabdd37ce8485aa90",
"size": "17591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vspk/v6/nubgpneighbor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
} |
import os
import re
from . import product
from .. import shell
from .. import targets
def get_os_spelling(os):
return {
'macosx': 'macOS',
'iphonesimulator': 'iOS',
'appletvsimulator': 'tvOS',
}[os]
class PlaygroundSupport(product.Product):
@classmethod
def product_source_name(cls):
return "swift-xcode-playground-support"
@classmethod
def is_build_script_impl_product(cls):
return False
def should_build(self, host_target):
return self.args.build_playgroundsupport
def build(self, host_target):
root = os.path.dirname(os.path.dirname(self.toolchain.swiftc))
swift_lib_dir = os.path.join(root, 'lib', 'swift')
(host_os, host_arch) = host_target.split('-')
with shell.pushd(self.source_dir):
shell.call([
"xcodebuild",
"-configuration", self.args.build_variant,
"-workspace", "swift-xcode-playground-support.xcworkspace",
"-scheme", "BuildScript-{}".format(get_os_spelling(host_os)),
"-sdk", host_os,
"-arch", host_arch,
"-derivedDataPath", os.path.join(self.build_dir, "DerivedData"),
"SWIFT_EXEC={}".format(self.toolchain.swiftc),
"SWIFT_LIBRARY_PATH={}/$(PLATFORM_NAME)".format(swift_lib_dir),
"ONLY_ACTIVE_ARCH=NO",
])
def should_test(self, host_target):
return re.match('macosx', host_target) and \
self.args.test_playgroundsupport
def test(self, host_target):
root = os.path.dirname(os.path.dirname(self.toolchain.swiftc))
swift_lib_dir = os.path.join(root, 'lib', 'swift')
(host_os, host_arch) = host_target.split('-')
with shell.pushd(self.source_dir):
shell.call([
"xcodebuild",
"test",
# NOTE: this *always* needs to run in Debug configuration
"-configuration", "Debug",
"-workspace", "swift-xcode-playground-support.xcworkspace",
"-scheme", "BuildScript-Test-PlaygroundLogger-{}".format(
get_os_spelling(host_os)),
"-sdk", host_os,
"-arch", host_arch,
"-derivedDataPath", os.path.join(self.build_dir, "DerivedData"),
"SWIFT_EXEC={}".format(self.toolchain.swiftc),
"SWIFT_LIBRARY_PATH={}/$(PLATFORM_NAME)".format(swift_lib_dir),
"ONLY_ACTIVE_ARCH=NO",
])
def should_install(self, host_target):
return self.args.install_playgroundsupport
def install(self, host_target):
root = os.path.dirname(os.path.dirname(self.toolchain.swiftc))
swift_lib_dir = os.path.join(root, 'lib', 'swift')
(host_os, host_arch) = host_target.split('-')
toolchain_prefix = \
targets.darwin_toolchain_prefix(self.args.install_prefix)
with shell.pushd(self.source_dir):
shell.call([
"xcodebuild",
"install",
"-configuration", self.args.build_variant,
"-workspace", "swift-xcode-playground-support.xcworkspace",
"-scheme", "BuildScript-{}".format(get_os_spelling(host_os)),
"-sdk", host_os,
"-arch", host_arch,
"-derivedDataPath", os.path.join(self.build_dir, "DerivedData"),
"SWIFT_EXEC={}".format(self.toolchain.swiftc),
"SWIFT_LIBRARY_PATH={}/$(PLATFORM_NAME)".format(swift_lib_dir),
"ONLY_ACTIVE_ARCH=NO",
"DSTROOT={}".format(self.args.install_destdir),
"TOOLCHAIN_INSTALL_DIR={}".format(toolchain_prefix),
"BUILD_PLAYGROUND_LOGGER_TESTS=NO",
])
| {
"content_hash": "d7e82074e796de43f807efb5442925ea",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 38.55,
"alnum_prop": 0.5546044098573282,
"repo_name": "aschwaighofer/swift",
"id": "81d53d841664b2b8e928a19e8509f2404e91f91b",
"size": "4362",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/swift_build_support/swift_build_support/products/playgroundsupport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13516"
},
{
"name": "C",
"bytes": "251927"
},
{
"name": "C++",
"bytes": "37266151"
},
{
"name": "CMake",
"bytes": "592998"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2593"
},
{
"name": "Emacs Lisp",
"bytes": "57302"
},
{
"name": "LLVM",
"bytes": "70652"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "446918"
},
{
"name": "Objective-C++",
"bytes": "251237"
},
{
"name": "Python",
"bytes": "1713191"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "177887"
},
{
"name": "Swift",
"bytes": "33804432"
},
{
"name": "Vim Script",
"bytes": "19683"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
"""This module contains various utility functions regarding unit conversion and
solc integration."""
import binascii
import json
import sys
import os
from pathlib import Path
from subprocess import PIPE, Popen
from ethereum.abi import encode_abi, encode_int, method_id
from ethereum.utils import zpad
from mythril.exceptions import CompilerError
if sys.version_info[1] >= 6:
import solcx
def safe_decode(hex_encoded_string):
"""
:param hex_encoded_string:
:return:
"""
if hex_encoded_string.startswith("0x"):
return bytes.fromhex(hex_encoded_string[2:])
else:
return bytes.fromhex(hex_encoded_string)
def get_solc_json(file, solc_binary="solc", solc_settings_json=None):
"""
:param file:
:param solc_binary:
:param solc_settings_json:
:return:
"""
cmd = [solc_binary, "--optimize", "--standard-json", "--allow-paths", "."]
settings = json.loads(solc_settings_json) if solc_settings_json else {}
settings.update(
{
"outputSelection": {
"*": {
"": ["ast"],
"*": [
"metadata",
"evm.bytecode",
"evm.deployedBytecode",
"evm.methodIdentifiers",
],
}
}
}
)
input_json = json.dumps(
{
"language": "Solidity",
"sources": {file: {"urls": [file]}},
"settings": settings,
}
)
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(bytes(input_json, "utf8"))
except FileNotFoundError:
raise CompilerError(
"Compiler not found. Make sure that solc is installed and in PATH, or set the SOLC environment variable."
)
out = stdout.decode("UTF-8")
result = json.loads(out)
for error in result.get("errors", []):
if error["severity"] == "error":
raise CompilerError(
"Solc experienced a fatal error.\n\n%s" % error["formattedMessage"]
)
return result
def encode_calldata(func_name, arg_types, args):
"""
:param func_name:
:param arg_types:
:param args:
:return:
"""
mid = method_id(func_name, arg_types)
function_selector = zpad(encode_int(mid), 4)
args = encode_abi(arg_types, args)
return "0x" + function_selector.hex() + args.hex()
def get_random_address():
"""
:return:
"""
return binascii.b2a_hex(os.urandom(20)).decode("UTF-8")
def get_indexed_address(index):
"""
:param index:
:return:
"""
return "0x" + (hex(index)[2:] * 40)
def solc_exists(version):
"""
:param version:
:return:
"""
solc_binaries = []
if version.startswith("0.4"):
solc_binaries = [
os.path.join(
os.environ.get("HOME", str(Path.home())),
".py-solc/solc-v" + version,
"bin/solc",
) # py-solc setup
]
elif sys.version_info[1] >= 6:
# we are using solc-x for the the 0.5 and higher
solc_binaries = [os.path.join(solcx.__path__[0], "bin", "solc-v" + version)]
for solc_path in solc_binaries:
if os.path.exists(solc_path):
return solc_path
# Last resort is to use the system installation
default_binary = "/usr/bin/solc"
if os.path.exists(default_binary):
return default_binary
| {
"content_hash": "6c86a8f1a28f759cc7c330679975ab33",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 117,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.5523190045248869,
"repo_name": "b-mueller/mythril",
"id": "0ec4e645262e4a3741bc9129040d04025f265928",
"size": "3536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mythril/ethereum/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "688"
},
{
"name": "HTML",
"bytes": "3582"
},
{
"name": "JavaScript",
"bytes": "531"
},
{
"name": "Python",
"bytes": "329678"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
} |
r"""
speaklater
~~~~~~~~~~
A module that provides lazy strings for translations. Basically you
get an object that appears to be a string but changes the value every
time the value is evaluated based on a callable you provide.
For example you can have a global `lazy_gettext` function that returns
a lazy string with the value of the current set language.
Example:
>>> from speaklater import make_lazy_string
>>> sval = u'Hello World'
>>> string = make_lazy_string(lambda: sval)
This lazy string will evaluate to the value of the `sval` variable.
>>> string
lu'Hello World'
>>> unicode(string)
u'Hello World'
>>> string.upper()
u'HELLO WORLD'
If you change the value, the lazy string will change as well:
>>> sval = u'Hallo Welt'
>>> string.upper()
u'HALLO WELT'
This is especially handy when combined with a thread local and gettext
translations or dicts of translatable strings:
>>> from speaklater import make_lazy_gettext
>>> from threading import local
>>> l = local()
>>> l.translations = {u'Yes': 'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: l.translations.get)
>>> yes = lazy_gettext(u'Yes')
>>> print yes
Ja
>>> l.translations[u'Yes'] = u'Si'
>>> print yes
Si
Lazy strings are no real strings so if you pass this sort of string to
a function that performs an instance check, it will fail. In that case
you have to explicitly convert it with `unicode` and/or `string` depending
on what string type the lazy string encapsulates.
To check if a string is lazy, you can use the `is_lazy_string` function:
>>> from speaklater import is_lazy_string
>>> is_lazy_string(u'yes')
False
>>> is_lazy_string(yes)
True
New in version 1.2: It's now also possible to pass keyword arguments to
the callback used with `make_lazy_string`.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def is_lazy_string(obj):
"""Checks if the given object is a lazy string."""
return isinstance(obj, _LazyString)
def make_lazy_string(__func, *args, **kwargs):
"""Creates a lazy string by invoking func with args."""
return _LazyString(__func, args, kwargs)
def make_lazy_gettext(lookup_func):
"""Creates a lazy gettext function dispatches to a gettext
function as returned by `lookup_func`.
Example:
>>> translations = {u'Yes': u'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: translations.get)
>>> x = lazy_gettext(u'Yes')
>>> x
lu'Ja'
>>> translations[u'Yes'] = u'Si'
>>> x
lu'Si'
"""
def lazy_gettext(string):
if is_lazy_string(string):
return string
return make_lazy_string(lookup_func(), string)
return lazy_gettext
class _LazyString(object):
"""Class for strings created by a function call.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
"""
__slots__ = ('_func', '_args', '_kwargs')
def __init__(self, func, args, kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
value = property(lambda x: x._func(*x._args, **x._kwargs))
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(unicode)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __hash__(self):
return hash(self.value)
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __getattr__(self, name):
if name == '__members__':
return self.__dir__()
return getattr(self.value, name)
def __getstate__(self):
return self._func, self._args, self._kwargs
def __setstate__(self, tup):
self._func, self._args, self._kwargs = tup
def __getitem__(self, key):
return self.value[key]
def __copy__(self):
return self
def __repr__(self):
try:
return 'l' + repr(self.value)
except Exception:
return '<%s broken>' % self.__class__.__name__
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "753c7a938a6e4ab93bf08151fdfe6159",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 78,
"avg_line_length": 25.985148514851485,
"alnum_prop": 0.5999237950085731,
"repo_name": "quokkaproject/speaklater",
"id": "249359e3127232ca1f3175e72d3455c9197ff148",
"size": "5273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speaklater.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6423"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.sparse as sp
import cvxpy.interface as intf
import cvxpy.settings as s
from cvxpy.error import SolverError
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.qp_solvers.qp_solver import QpSolver
class OSQP(QpSolver):
"""QP interface for the OSQP solver"""
# Map of OSQP status to CVXPY status.
STATUS_MAP = {1: s.OPTIMAL,
2: s.OPTIMAL_INACCURATE,
-2: s.SOLVER_ERROR, # Maxiter reached
-3: s.INFEASIBLE,
3: s.INFEASIBLE_INACCURATE,
-4: s.UNBOUNDED,
4: s.UNBOUNDED_INACCURATE,
-6: s.USER_LIMIT,
-5: s.SOLVER_ERROR, # Interrupted by user
-10: s.SOLVER_ERROR} # Unsolved
def name(self):
return s.OSQP
def import_solver(self) -> None:
import osqp
osqp
def invert(self, solution, inverse_data):
attr = {s.SOLVE_TIME: solution.info.run_time}
attr[s.EXTRA_STATS] = solution
# Map OSQP statuses back to CVXPY statuses
status = self.STATUS_MAP.get(solution.info.status_val, s.SOLVER_ERROR)
if status in s.SOLUTION_PRESENT:
opt_val = solution.info.obj_val + inverse_data[s.OFFSET]
primal_vars = {
OSQP.VAR_ID:
intf.DEFAULT_INTF.const_to_matrix(np.array(solution.x))
}
dual_vars = {OSQP.DUAL_VAR_ID: solution.y}
attr[s.NUM_ITERS] = solution.info.iter
sol = Solution(status, opt_val, primal_vars, dual_vars, attr)
else:
sol = failure_solution(status, attr)
return sol
def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts,
solver_cache=None):
import osqp
P = data[s.P]
q = data[s.Q]
A = sp.vstack([data[s.A], data[s.F]]).tocsc()
data['Ax'] = A
uA = np.concatenate((data[s.B], data[s.G]))
data['u'] = uA
lA = np.concatenate([data[s.B], -np.inf*np.ones(data[s.G].shape)])
data['l'] = lA
# Overwrite defaults eps_abs=eps_rel=1e-3, max_iter=4000
solver_opts['eps_abs'] = solver_opts.get('eps_abs', 1e-5)
solver_opts['eps_rel'] = solver_opts.get('eps_rel', 1e-5)
solver_opts['max_iter'] = solver_opts.get('max_iter', 10000)
# Use cached data
if warm_start and solver_cache is not None and self.name() in solver_cache:
solver, old_data, results = solver_cache[self.name()]
new_args = {}
for key in ['q', 'l', 'u']:
if any(data[key] != old_data[key]):
new_args[key] = data[key]
factorizing = False
if P.data.shape != old_data[s.P].data.shape or any(
P.data != old_data[s.P].data):
P_triu = sp.triu(P).tocsc()
new_args['Px'] = P_triu.data
factorizing = True
if A.data.shape != old_data['Ax'].data.shape or any(
A.data != old_data['Ax'].data):
new_args['Ax'] = A.data
factorizing = True
if new_args:
solver.update(**new_args)
# Map OSQP statuses back to CVXPY statuses
status = self.STATUS_MAP.get(results.info.status_val, s.SOLVER_ERROR)
if status == s.OPTIMAL:
solver.warm_start(results.x, results.y)
# Polish if factorizing.
solver_opts['polish'] = solver_opts.get('polish', factorizing)
solver.update_settings(verbose=verbose, **solver_opts)
else:
# Initialize and solve problem
solver_opts['polish'] = solver_opts.get('polish', True)
solver = osqp.OSQP()
try:
solver.setup(P, q, A, lA, uA, verbose=verbose, **solver_opts)
except ValueError as e:
raise SolverError(e)
results = solver.solve()
if solver_cache is not None:
solver_cache[self.name()] = (solver, data, results)
return results
| {
"content_hash": "d1f1309ee735c84e6eb7310808ceeb3e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 83,
"avg_line_length": 38.38181818181818,
"alnum_prop": 0.5378967314069162,
"repo_name": "merraksh/cvxpy",
"id": "cdbcb5b759d3ff294b0f87596a2d3d81e9efe99a",
"size": "4222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvxpy/reductions/solvers/qp_solvers/osqp_qpif.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
} |
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module is used to stress test Impala by running queries concurrently.
#
# Stress test outline (and notes):
# 1) Get a set of queries as requested by the user from the CLI options.
# 2) For each query, run it individually to find:
# a) Minimum mem limit to avoid spilling
# b) Minimum mem limit to successfully run the query (spilling allowed)
# c) Runtime when no mem was spilled
# d) Runtime when mem was spilled
# e) A row order independent hash of the result set.
# This is a slow process so the results will be written to disk for reuse.
# 3) Find the memory available to Impalad. This will be done by finding the minimum
# memory available across all impalads (-mem_limit startup option). Ideally, for
# maximum stress, all impalads will have the same memory configuration but this is
# not required.
# 4) Optionally, set an amount of memory that can be overcommitted. Overcommitting
# memory can increase memory pressure which can result in memory being spilled to
# disk or queries failing with out-of-memory.
# 5) Start submitting queries. There are two modes for throttling the number of
# concurrent queries, depending on --test-admission-control.
# a) test-admission-control=false: Submit queries until all available memory (as
# determined by items 3 and 4) is used. Before running the query a query mem
# limit is set between 2a and 2b. (There is a runtime option to increase the
# likelihood that a query will be given the full 2a limit to avoid spilling.)
# b) test-admission-control=true: Submit enough queries to achieve the desired
# level of overcommit, but expect that Impala's admission control will throttle
# queries. In this mode mem_limit is not set per query.
# 6) Randomly cancel queries to test cancellation. There is a runtime option to control
# the likelihood that a query will be randomly canceled.
# 7) If a query errored, verify that the error is expected. Errors are expected in the
# following cases:
# a) Memory-based admission control is not being tested (i.e.
# --test-admission-control=false), the error is an out-of-memory error and memory
# on the cluster is overcommitted.
# b) The error is an admission control rejection or timeout.
# 8) Verify the result set hash of successful queries if there are no DML queries in the
# current run.
from __future__ import print_function
import json
import logging
import os
import re
import signal
import sys
import threading
import traceback
from Queue import Empty # Must be before Queue below
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace, SUPPRESS
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from datetime import datetime
from multiprocessing import Lock, Process, Queue, Value
from random import choice, random, randrange, shuffle
from sys import exit, maxint
from tempfile import gettempdir
from textwrap import dedent
from threading import current_thread, Thread
from time import sleep, time
import tests.comparison.cli_options as cli_options
import tests.util.test_file_parser as test_file_parser
from tests.comparison.cluster import Timeout
from tests.comparison.db_types import Int, TinyInt, SmallInt, BigInt
from tests.comparison.model_translator import SqlWriter
from tests.comparison.query_generator import QueryGenerator
from tests.comparison.query_profile import DefaultProfile
from tests.util.parse_util import (
EXPECTED_TPCDS_QUERIES_COUNT, EXPECTED_TPCH_NESTED_QUERIES_COUNT,
EXPECTED_TPCH_STRESS_QUERIES_COUNT, match_memory_estimate, parse_mem_to_mb)
from tests.util.thrift_util import op_handle_to_query_id
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
PROFILES_DIR = "profiles"
RESULT_HASHES_DIR = "result_hashes"
# The version of the file format containing the collected query runtime info.
RUNTIME_INFO_FILE_VERSION = 3
# Metrics collected during the stress running process.
NUM_QUERIES_DEQUEUED = "num_queries_dequeued"
# The number of queries that were submitted to a query runner.
NUM_QUERIES_SUBMITTED = "num_queries_submitted"
# The number of queries that have entered the RUNNING state (i.e. got through Impala's
# admission control and started executing) or were cancelled or hit an error.
NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED = "num_queries_started_running_or_cancelled"
NUM_QUERIES_FINISHED = "num_queries_finished"
NUM_QUERIES_EXCEEDED_MEM_LIMIT = "num_queries_exceeded_mem_limit"
NUM_QUERIES_AC_REJECTED = "num_queries_ac_rejected"
NUM_QUERIES_AC_TIMEDOUT = "num_queries_ac_timedout"
NUM_QUERIES_CANCELLED = "num_queries_cancelled"
NUM_RESULT_MISMATCHES = "num_result_mismatches"
NUM_OTHER_ERRORS = "num_other_errors"
class StressArgConverter(object):
def __init__(self, args):
"""
Convert arguments as returned from from argparse parse_args() into internal forms.
The purpose of this object is to do any conversions needed from the type given by
parge_args() into internal forms. For example, if a commandline option takes in a
complicated string that needs to be converted into a list or dictionary, this is the
place to do it. Access works the same as on the object returned by parse_args(),
i.e., object.option_attribute.
In most cases, simple arguments needn't be converted, because argparse handles the
type conversion already, and in most cases, type conversion (e.g., "8" <str> to 8
<int>) is all that's needed. If a property getter below doesn't exist, it means the
argument value is just passed along unconverted.
Params:
args: argparse.Namespace object (from argparse.ArgumentParser().parse_args())
"""
assert isinstance(args, Namespace), "expected Namespace, got " + str(type(args))
self._args = args
self._common_query_options = None
def __getattr__(self, attr):
# This "proxies through" all the attributes from the Namespace object that are not
# defined in this object via property getters below.
return getattr(self._args, attr)
@property
def common_query_options(self):
# Memoize this, as the integrity checking of --common-query-options need only
# happen once.
if self._common_query_options is not None:
return self._common_query_options
# The stress test sets these, so callers cannot override them.
IGNORE_QUERY_OPTIONS = frozenset([
'ABORT_ON_ERROR',
'MEM_LIMIT',
])
common_query_options = {}
if self._args.common_query_options is not None:
for query_option_and_value in self._args.common_query_options:
try:
query_option, value = query_option_and_value.split('=')
except ValueError:
LOG.error(
"Could not parse --common-query-options: '{common_query_options}'".format(
common_query_options=self._args.common_query_options))
exit(1)
query_option = query_option.upper()
if query_option in common_query_options:
LOG.error(
"Query option '{query_option}' already defined in --common-query-options: "
"'{common_query_options}'".format(
query_option=query_option,
common_query_options=self._args.common_query_options))
exit(1)
elif query_option in IGNORE_QUERY_OPTIONS:
LOG.warn(
"Ignoring '{query_option}' in common query options: '{opt}': "
"The stress test algorithm needs control of this option.".format(
query_option=query_option, opt=self._args.common_query_options))
else:
common_query_options[query_option] = value
LOG.debug("Common query option '{query_option}' set to '{value}'".format(
query_option=query_option, value=value))
self._common_query_options = common_query_options
return self._common_query_options
@property
def runtime_info_path(self):
runtime_info_path = self._args.runtime_info_path
if "{cm_host}" in runtime_info_path:
runtime_info_path = runtime_info_path.format(cm_host=self._args.cm_host)
return runtime_info_path
def create_and_start_daemon_thread(fn, name):
thread = Thread(target=fn, name=name)
thread.error = None
thread.daemon = True
thread.start()
return thread
def increment(counter):
with counter.get_lock():
counter.value += 1
def print_stacks(*_):
"""Print the stacks of all threads from this script to stderr."""
thread_names = dict([(t.ident, t.name) for t in threading.enumerate()])
stacks = list()
for thread_id, stack in sys._current_frames().items():
stacks.append(
"\n# Thread: %s(%d)"
% (thread_names.get(thread_id, "No name"), thread_id))
for filename, lineno, name, line in traceback.extract_stack(stack):
stacks.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
stacks.append(" %s" % (line.strip(), ))
print("\n".join(stacks), file=sys.stderr)
# To help debug hangs, the stacks of all threads can be printed by sending signal USR1
# to each process.
signal.signal(signal.SIGUSR1, print_stacks)
def print_crash_info_if_exists(impala, start_time):
"""If any impalads are found not running, they will assumed to have crashed and an
error message will be printed to stderr for each stopped impalad. Returns a value
that evaluates to True if any impalads are stopped.
"""
max_attempts = 5
for remaining_attempts in xrange(max_attempts - 1, -1, -1):
try:
crashed_impalads = impala.find_crashed_impalads(start_time)
break
except Timeout as e:
LOG.info(
"Timeout checking if impalads crashed: %s."
% e + (" Will retry." if remaining_attempts else ""))
else:
LOG.error(
"Aborting after %s failed attempts to check if impalads crashed", max_attempts)
raise e
for message in crashed_impalads.itervalues():
print(message, file=sys.stderr)
return crashed_impalads
class QueryReport(object):
"""Holds information about a single query run."""
def __init__(self, query):
self.query = query
self.result_hash = None
self.runtime_secs = None
self.mem_was_spilled = False
# not_enough_memory includes conditions like "Memory limit exceeded", admission
# control rejecting because not enough memory, etc.
self.not_enough_memory = False
# ac_rejected is true if the query was rejected by admission control.
# It is mutually exclusive with not_enough_memory - if the query is rejected by
# admission control because the memory limit is too low, it is counted as
# not_enough_memory.
# TODO: reconsider whether they should be mutually exclusive
self.ac_rejected = False
self.ac_timedout = False
self.other_error = None
self.timed_out = False
self.was_cancelled = False
self.profile = None
self.query_id = None
def __str__(self):
return dedent("""
<QueryReport
result_hash: %(result_hash)s
runtime_secs: %(runtime_secs)s
mem_was_spilled: %(mem_was_spilled)s
not_enough_memory: %(not_enough_memory)s
ac_rejected: %(ac_rejected)s
ac_timedout: %(ac_timedout)s
other_error: %(other_error)s
timed_out: %(timed_out)s
was_cancelled: %(was_cancelled)s
query_id: %(query_id)s
>
""".strip() % self.__dict__)
def has_query_error(self):
"""Return true if any kind of error status was returned from the query (i.e.
the query didn't run to completion, time out or get cancelled)."""
return (self.not_enough_memory or self.ac_rejected or self.ac_timedout
or self.other_error)
def write_query_profile(self, directory, prefix=None):
"""
Write out the query profile bound to this object to a given directory.
The file name is generated and will contain the query ID. Use the optional prefix
parameter to set a prefix on the filename.
Example return:
tpcds_300_decimal_parquet_q21_00000001_a38c8331_profile.txt
Parameters:
directory (str): Directory to write profile.
prefix (str): Prefix for filename.
"""
if not (self.profile and self.query_id):
return
if prefix is not None:
file_name = prefix + '_'
else:
file_name = ''
file_name += self.query.logical_query_id + '_'
file_name += self.query_id.replace(":", "_") + "_profile.txt"
profile_log_path = os.path.join(directory, file_name)
with open(profile_log_path, "w") as profile_log:
profile_log.write(self.profile)
class MemBroker(object):
"""Provides memory usage coordination for clients running in different processes.
The broker fulfills reservation requests by blocking as needed so total memory
used by clients never exceeds the total available memory (including an
'overcommitable' amount).
The lock built in to _available is also used to protect access to other members.
The state stored in this class is actually an encapsulation of part of the state
of the StressRunner class below. The state here is separated for clarity.
"""
def __init__(self, real_mem_mb, overcommitable_mem_mb):
"""'real_mem_mb' memory should be the amount of memory that each impalad is able
to use. 'overcommitable_mem_mb' is the amount of memory that will be dispensed
over the 'real' amount.
"""
self._total_mem_mb = real_mem_mb + overcommitable_mem_mb
self._available = Value("i", self._total_mem_mb)
self._max_overcommitment = overcommitable_mem_mb
# Each reservation will be assigned an id. Ids are monotonically increasing. When
# a reservation crosses the overcommitment threshold, the corresponding reservation
# id will be stored in '_last_overcommitted_reservation_id' so clients can check
# to see if memory was overcommitted since their reservation was made (this is a race
# but an incorrect result will be on the conservative side).
self._next_reservation_id = Value("L", 0)
self._last_overcommitted_reservation_id = Value("L", 0)
@property
def total_mem_mb(self):
return self._total_mem_mb
@property
def overcommitted_mem_mb(self):
return max(self._max_overcommitment - self._available.value, 0)
@property
def available_mem_mb(self):
return self._available.value
@property
def last_overcommitted_reservation_id(self):
return self._last_overcommitted_reservation_id.value
@contextmanager
def reserve_mem_mb(self, mem_mb):
"""Blocks until the requested amount of memory is available and taken for the caller.
This function should be used in a 'with' block. The taken memory will
automatically be released when the 'with' context exits. A numeric id is returned
so clients can compare against 'last_overcommitted_reservation_id' to see if
memory was overcommitted since the reservation was obtained.
with broker.reserve_mem_mb(100) as reservation_id:
# Run query using 100 MB of memory
if <query failed>:
# Immediately check broker.was_overcommitted(reservation_id) to see if
# memory was overcommitted.
"""
reservation_id = self._wait_until_reserved(mem_mb)
try:
yield reservation_id
finally:
self._release(mem_mb)
def _wait_until_reserved(self, req):
while True:
with self._available.get_lock():
if req <= self._available.value:
self._available.value -= req
LOG.debug(
"Reserved %s MB; %s MB available; %s MB overcommitted",
req, self._available.value, self.overcommitted_mem_mb)
reservation_id = self._next_reservation_id.value
increment(self._next_reservation_id)
if self.overcommitted_mem_mb > 0:
self._last_overcommitted_reservation_id.value = reservation_id
return reservation_id
sleep(0.1)
def _release(self, req):
with self._available.get_lock():
self._available.value += req
LOG.debug(
"Released %s MB; %s MB available; %s MB overcommitted",
req, self._available.value, self.overcommitted_mem_mb)
def was_overcommitted(self, reservation_id):
"""Returns True if memory was overcommitted since the given reservation was made.
For an accurate return value, this should be called just after the query ends
or while the query is still running.
"""
return reservation_id <= self._last_overcommitted_reservation_id.value
class StressRunner(object):
"""This class contains functionality related to producing/consuming queries for the
purpose of stress testing Impala.
Queries will be executed in separate processes since python threading is limited
to the use of a single CPU.
"""
# This is the point at which the work queue will block because it is full.
WORK_QUEUE_CAPACITY = 10
def __init__(self):
self.use_kerberos = False
self.common_query_options = {}
self.test_admission_control = False
self._mem_broker = None
self._verify_results = True
self._select_probability = None
# Synchronized blocking work queue for producer/consumers.
self._query_queue = Queue(self.WORK_QUEUE_CAPACITY)
# The Value class provides cross-process shared memory.
self._mem_mb_needed_for_next_query = Value("i", 0)
# This lock provides a way to stop new queries from running. This lock must be
# acquired before writing to the NUM_QUERIES_SUBMITTED metric for the query_runner,
# which is incremented before every query submission.Reading NUM_QUERIES_SUBMITTED is
# allowed without taking this lock.
self._submit_query_lock = Lock()
self.leak_check_interval_mins = None
self._next_leak_check_unix_time = Value("i", 0)
self._max_mem_mb_reported_usage = Value("i", -1) # -1 => Unknown
self._max_mem_mb_usage = Value("i", -1) # -1 => Unknown
self.cancel_probability = 0
self.spill_probability = 0
self.startup_queries_per_sec = 1.0
self.num_successive_errors_needed_to_abort = 1
self._num_successive_errors = Value("i", 0)
self.results_dir = gettempdir()
self._status_headers = [
"Done", "Active", "Executing", "Mem Lmt Ex", "AC Reject", "AC Timeout",
"Cancel", "Err", "Incorrect", "Next Qry Mem Lmt",
"Tot Qry Mem Lmt", "Tracked Mem", "RSS Mem"]
self._num_queries_to_run = None
self._query_producer_thread = None
# This lock is used to synchronize access to the '_query_runners' list and also to all
# the '_past_runners*' members.
self._query_runners_lock = Lock()
self._query_runners = []
# These are the cumulative values of all the queries that have started/finished/-
# dequeued, etc. on runners that have already died. Every time we notice that a query
# runner has died, we update these values.
self._past_runner_metrics = defaultdict(lambda: Value("i", 0))
self._query_consumer_thread = None
self._mem_polling_thread = None
def _record_runner_metrics_before_evict(self, query_runner):
""" Before removing 'query_runner' from the self._query_runners list, record its
metrics. Must only be called if 'query_runner' is to be removed from the list.
MUST hold '_query_runners_lock' before calling.
"""
for key, synchronized_val in query_runner._metrics.iteritems():
self._past_runner_metrics[key].value += synchronized_val.value
def _calc_total_runner_metrics(self):
""" Calculate the total of metrics across past and active query runners. """
totals = defaultdict(lambda: 0)
with self._query_runners_lock:
for key in self._past_runner_metrics:
totals[key] = self._past_runner_metrics[key].value
for query_runner in self._query_runners:
for key, synchronized_val in query_runner._metrics.iteritems():
totals[key] += synchronized_val.value
return totals
def _calc_total_runner_metric(self, key):
""" Calculate the total of metric 'key' across past and active query runners. """
with self._query_runners_lock:
return self._calc_total_runner_metric_no_lock(key)
def _calc_total_runner_metric_no_lock(self, key):
""" TODO: Get rid of this function after reformatting how we obtain query indices.
_query_runners_lock MUST be taken before calling this function.
"""
total = self._past_runner_metrics[key].value
for runner in self._query_runners:
total += runner._metrics[key].value
return total
def _total_num_queries_submitted(self):
return self._calc_total_runner_metric(NUM_QUERIES_SUBMITTED)
def _total_num_queries_active(self):
"""The number of queries that are currently active (i.e. submitted to a query runner
and haven't yet completed)."""
metrics = self._calc_total_runner_metrics()
num_running = metrics[NUM_QUERIES_SUBMITTED] - metrics[NUM_QUERIES_FINISHED]
assert num_running >= 0, "The number of running queries is negative"
return num_running
def _num_runners_remaining(self):
return len(self._query_runners)
def run_queries(
self, queries, impala, num_queries_to_run, mem_overcommit_pct, should_print_status,
verify_results, select_probability
):
"""Runs queries randomly chosen from 'queries' and stops after 'num_queries_to_run'
queries have completed. 'select_probability' should be float between 0 and 1, it
determines the likelihood of choosing a select query (as opposed to a DML query,
for example).
Before a query is run, a mem limit will be chosen. 'spill_probability' determines
the likelihood of choosing a mem limit that will cause spilling. To induce
spilling, a value is randomly chosen below the min memory needed to avoid spilling
but above the min memory needed with spilling. So the min/max query memory
requirements must be determined before calling this method.
If 'mem_overcommit_pct' is zero, an exception will be raised if any queries
fail for any reason other than cancellation (controlled by the 'cancel_probability'
property), since each query should have enough memory to run successfully. If
non-zero, failures due to insufficient memory will be ignored if memory was
overcommitted at any time during execution.
If a query completes without error, the result will be verified if 'verify_results'
is True. An error will be raised upon a result mismatch. 'verify_results' should be
false for the case where the expected results are not known in advance, if we are
running DML queries, for example.
"""
# TODO: The state from a previous run should be cleared out. This isn't really a
# problem now because the one caller (main()) never calls a second time.
if self.startup_queries_per_sec <= 0:
raise Exception("Startup queries per second must be positive")
if self.leak_check_interval_mins is not None and self.leak_check_interval_mins <= 0:
raise Exception("Memory leak check interval must be positive")
# If there is a crash, start looking for errors starting from this time.
self.start_time = datetime.now()
self._mem_broker = MemBroker(
impala.min_impalad_mem_mb,
int(impala.min_impalad_mem_mb * mem_overcommit_pct / 100))
self._verify_results = verify_results
self._select_probability = select_probability
# Print the status to show the state before starting.
if should_print_status:
self._print_status(print_header=True)
self._num_queries_to_run = num_queries_to_run
self._start_polling_mem_usage(impala)
self._start_producing_queries(queries)
self._start_consuming_queries(impala)
# Wait for everything to finish.
self._wait_for_test_to_finish(impala, should_print_status)
# And print the final state.
if should_print_status:
self._print_status()
self._check_for_test_failure()
self.print_duration()
def _start_producing_queries(self, queries):
def enqueue_queries():
# Generate a dict(query type -> list of queries).
queries_by_type = {}
for query in queries:
if query.query_type not in queries_by_type:
queries_by_type[query.query_type] = []
queries_by_type[query.query_type].append(query)
try:
for _ in xrange(self._num_queries_to_run):
# First randomly determine a query type, then choose a random query of that
# type.
if (
QueryType.SELECT in queries_by_type and
(len(queries_by_type.keys()) == 1 or random() < self._select_probability)
):
result = choice(queries_by_type[QueryType.SELECT])
else:
query_type = choice([
key for key in queries_by_type if key != QueryType.SELECT])
result = choice(queries_by_type[query_type])
self._query_queue.put(result)
except Exception as e:
LOG.error("Error producing queries: %s", e)
current_thread().error = e
raise e
LOG.info("Producing thread completed job. Exiting...")
self._query_producer_thread = create_and_start_daemon_thread(
enqueue_queries, "Query Producer")
def _start_consuming_queries(self, impala):
def start_additional_runners_if_needed():
try:
while self._total_num_queries_submitted() < self._num_queries_to_run:
# TODO: sleeping for the below amount leads to slower submission than the goal,
# because it does not factor in the time spent by this thread outside of the
# sleep() call.
sleep(1.0 / self.startup_queries_per_sec)
# Remember num dequeued/started are cumulative.
with self._submit_query_lock:
metrics = self._calc_total_runner_metrics()
num_dequeued = metrics[NUM_QUERIES_DEQUEUED]
num_submitted = metrics[NUM_QUERIES_SUBMITTED]
LOG.debug("Submitted {0} queries. Dequeued {1} queries".format(
num_submitted, num_dequeued))
if num_dequeued != num_submitted:
# Assume dequeued queries are stuck waiting for cluster resources so there
# is no point in starting an additional runner.
continue
num_coordinators = len(impala.impalads)
if self.max_coordinators > 0:
num_coordinators = min(num_coordinators, self.max_coordinators)
impalad = impala.impalads[len(self._query_runners) % num_coordinators]
query_runner = QueryRunner()
query_runner.impalad = impalad
query_runner.results_dir = self.results_dir
query_runner.use_kerberos = self.use_kerberos
query_runner.common_query_options = self.common_query_options
query_runner.test_admission_control = self.test_admission_control
query_runner.proc = \
Process(target=self._start_single_runner, args=(query_runner, ))
query_runner.proc.daemon = True
with self._query_runners_lock:
self._query_runners.append(query_runner)
query_runner.proc.start()
LOG.info("Consuming thread completed job. Exiting...")
except Exception as e:
LOG.error("Error consuming queries: %s", e)
current_thread().error = e
raise e
self._query_consumer_thread = create_and_start_daemon_thread(
start_additional_runners_if_needed, "Query Consumer")
def _start_polling_mem_usage(self, impala):
def poll_mem_usage():
if self.leak_check_interval_mins:
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
query_submission_is_locked = False
# Query submission will be unlocked after a memory report has been collected twice
# while no queries were running.
ready_to_unlock = None
try:
while self._total_num_queries_submitted() < self._num_queries_to_run:
if ready_to_unlock:
assert query_submission_is_locked, "Query submission not yet locked"
assert not self._total_num_queries_active(), "Queries are still running"
LOG.debug("Resuming query submission")
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
self._submit_query_lock.release()
query_submission_is_locked = False
ready_to_unlock = None
if (
not query_submission_is_locked and
self.leak_check_interval_mins and
time() > self._next_leak_check_unix_time.value
):
assert self._total_num_queries_active() <= self._num_runners_remaining(), \
"Each running query should belong to a runner"
LOG.debug("Stopping query submission")
self._submit_query_lock.acquire()
query_submission_is_locked = True
max_reported, max_actual = self._get_mem_usage_values()
if max_reported != -1 and max_actual != -1:
# Value were already retrieved but haven't been used yet. Assume newer
# values aren't wanted and check again later.
sleep(1)
continue
try:
max_reported = max(impala.find_impalad_mem_mb_reported_usage())
except Timeout:
LOG.debug("Timeout collecting reported mem usage")
max_reported = -1
try:
max_actual = max(impala.find_impalad_mem_mb_actual_usage())
except Timeout:
LOG.debug("Timeout collecting reported actual usage")
max_actual = -1
self._set_mem_usage_values(max_reported, max_actual)
if query_submission_is_locked and not self._total_num_queries_active():
if ready_to_unlock is None:
ready_to_unlock = False
else:
ready_to_unlock = True
except Exception:
LOG.debug("Error collecting impalad mem usage", exc_info=True)
if query_submission_is_locked:
LOG.debug("Resuming query submission")
self._submit_query_lock.release()
self._mem_polling_thread = create_and_start_daemon_thread(
poll_mem_usage, "Mem Usage Poller")
def _get_mem_usage_values(self, reset=False):
reported = None
actual = None
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
reported = self._max_mem_mb_reported_usage.value
actual = self._max_mem_mb_usage.value
if reset:
self._max_mem_mb_reported_usage.value = -1
self._max_mem_mb_usage.value = -1
return reported, actual
def _set_mem_usage_values(self, reported, actual):
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
self._max_mem_mb_reported_usage.value = reported
self._max_mem_mb_usage.value = actual
def _start_single_runner(self, query_runner):
"""Consumer function to take a query of the queue and run it. This is intended to
run in a separate process so validating the result set can use a full CPU.
"""
LOG.debug("New query runner started")
# The query runner should already be set up. We just need to connect() before using
# the runner.
query_runner.connect()
while not self._query_queue.empty():
try:
query = self._query_queue.get(True, 1)
except Empty:
continue
except EOFError:
LOG.debug("Query running aborting due to closed query queue")
break
LOG.debug("Getting query_idx")
with self._query_runners_lock:
query_idx = self._calc_total_runner_metric_no_lock(NUM_QUERIES_DEQUEUED)
increment(query_runner._metrics[NUM_QUERIES_DEQUEUED])
LOG.debug("Query_idx: {0} | PID: {1}".format(query_idx, query_runner.proc.pid))
if not query.required_mem_mb_without_spilling:
mem_limit = query.required_mem_mb_with_spilling
solo_runtime = query.solo_runtime_secs_with_spilling
elif self.spill_probability < random():
mem_limit = query.required_mem_mb_without_spilling
solo_runtime = query.solo_runtime_secs_without_spilling
else:
mem_limit = randrange(
query.required_mem_mb_with_spilling,
query.required_mem_mb_without_spilling + 1)
solo_runtime = query.solo_runtime_secs_with_spilling
LOG.debug("Waiting for other query runners to start their queries")
while query_idx > self._total_num_queries_submitted():
sleep(0.1)
self._mem_mb_needed_for_next_query.value = mem_limit
LOG.debug("Requesting memory reservation")
with self._mem_broker.reserve_mem_mb(mem_limit) as reservation_id:
LOG.debug("Received memory reservation")
with self._submit_query_lock:
increment(query_runner._metrics[NUM_QUERIES_SUBMITTED])
should_cancel = self.cancel_probability > random()
if should_cancel:
timeout = randrange(1, max(int(solo_runtime), 2))
else:
# Let the query run as long as necessary - it is nearly impossible to pick a
# good value that won't have false positives under load - see IMPALA-8222.
timeout = maxint
report = query_runner.run_query(query, mem_limit, timeout_secs=timeout,
should_cancel=should_cancel)
LOG.debug("Got execution report for query")
if report.timed_out and should_cancel:
report.was_cancelled = True
query_runner.update_from_query_report(report)
if report.other_error:
error_msg = str(report.other_error)
# There is a possible race during cancellation. If a fetch request fails (for
# example due to hitting a mem limit), just before the cancellation request, the
# server may have already unregistered the query as part of the fetch failure.
# In that case the server gives an error response saying the handle is invalid.
if "Invalid query handle" in error_msg and report.timed_out:
self._num_successive_errors.value = 0
continue
# Occasionally the network connection will fail, and depending on when the
# failure occurred during run_query(), an attempt to get the profile may be
# made which results in "Invalid session id" since the server destroyed the
# session upon disconnect.
if "Invalid session id" in error_msg:
self._num_successive_errors.value = 0
continue
# The server may fail to respond to clients if the load is high. An error
# message with "connect()...Connection timed out" comes from the impalad so
# that will not be ignored.
if (
("Connection timed out" in error_msg and "connect()" not in error_msg) or
"ECONNRESET" in error_msg or
"couldn't get a client" in error_msg or
"timeout: timed out" in error_msg
):
self._num_successive_errors.value = 0
continue
increment(self._num_successive_errors)
increment(query_runner._metrics[NUM_OTHER_ERRORS])
self._write_query_profile(report, PROFILES_DIR, prefix='error')
raise Exception("Query {query} ID {id} failed: {mesg}".format(
query=query.logical_query_id,
id=report.query_id,
mesg=error_msg))
if (
report.not_enough_memory and (self.test_admission_control or
not self._mem_broker.was_overcommitted(reservation_id))
):
increment(self._num_successive_errors)
self._write_query_profile(
report, PROFILES_DIR, prefix='unexpected_mem_exceeded')
raise Exception("Unexpected mem limit exceeded; mem was not overcommitted. "
"Query ID: {0}".format(report.query_id))
if (
not report.timed_out and not report.has_query_error() and
(self._verify_results and report.result_hash != query.result_hash)
):
increment(self._num_successive_errors)
increment(query_runner._metrics[NUM_RESULT_MISMATCHES])
self._write_query_profile(report, PROFILES_DIR, prefix='incorrect_results')
raise Exception(dedent("""\
Result hash mismatch; expected {expected}, got {actual}
Query ID: {id}
Query: {query}""".format(expected=query.result_hash,
actual=report.result_hash,
id=report.query_id,
query=query.logical_query_id)))
if report.timed_out and not should_cancel:
self._write_query_profile(report, PROFILES_DIR, prefix='timed_out')
raise Exception(
"Query {query} unexpectedly timed out. Query ID: {id}".format(
query=query.logical_query_id, id=report.query_id))
self._num_successive_errors.value = 0
LOG.debug("Query runner completed...")
def _print_status_header(self):
print(" | ".join(self._status_headers))
def _print_status(self, print_header=False):
if print_header:
self._print_status_header()
metrics = self._calc_total_runner_metrics()
reported_mem, actual_mem = self._get_mem_usage_values(reset=True)
status_format = " | ".join(["%%%ss" % len(header) for header in self._status_headers])
print(status_format % (
# Done
metrics[NUM_QUERIES_FINISHED],
# Active
metrics[NUM_QUERIES_SUBMITTED] - metrics[NUM_QUERIES_FINISHED],
# Executing
metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED] -
metrics[NUM_QUERIES_FINISHED],
# Mem Lmt Ex
metrics[NUM_QUERIES_EXCEEDED_MEM_LIMIT],
# AC Rejected
metrics[NUM_QUERIES_AC_REJECTED],
# AC Timed Out
metrics[NUM_QUERIES_AC_TIMEDOUT],
# Cancel
metrics[NUM_QUERIES_CANCELLED],
# Err
metrics[NUM_OTHER_ERRORS],
# Incorrect
metrics[NUM_RESULT_MISMATCHES],
# Next Qry Mem Lmt
self._mem_mb_needed_for_next_query.value,
# Total Qry Mem Lmt
self._mem_broker.total_mem_mb - self._mem_broker.available_mem_mb,
# Tracked Mem
"" if reported_mem == -1 else reported_mem,
# RSS Mem
"" if actual_mem == -1 else actual_mem))
def _write_query_profile(self, report, subdir, prefix=None):
report.write_query_profile(
os.path.join(self.results_dir, subdir),
prefix)
def _check_successive_errors(self):
if (self._num_successive_errors.value >= self.num_successive_errors_needed_to_abort):
print(
"Aborting due to %s successive errors encountered"
% self._num_successive_errors.value, file=sys.stderr)
self.print_duration()
sys.exit(1)
def _check_for_test_failure(self):
metrics = self._calc_total_runner_metrics()
if metrics[NUM_OTHER_ERRORS] > 0 or metrics[NUM_RESULT_MISMATCHES] > 0:
LOG.error("Failing the stress test due to unexpected errors, incorrect results, or "
"timed out queries. See the report line above for details.")
self.print_duration()
sys.exit(1)
def _wait_for_test_to_finish(self, impala, should_print_status):
last_report_secs = 0
lines_printed = 1
sleep_secs = 0.1
num_runners_remaining = self._num_runners_remaining()
while (
self._query_producer_thread.is_alive() or
self._query_consumer_thread.is_alive() or
num_runners_remaining
):
if self._query_producer_thread.error or self._query_consumer_thread.error:
# This is bad enough to abort early. A failure here probably means there's a
# bug in this script. The mem poller could be checked for an error too. It is
# not critical so is ignored.
LOG.error("Aborting due to error in producer/consumer")
sys.exit(1)
do_check_for_impala_crashes = False
with self._query_runners_lock:
for idx, runner in enumerate(self._query_runners):
if runner.proc.exitcode is not None:
if runner.proc.exitcode != 0:
# Since at least one query runner process failed, make sure to check for
# crashed impalads.
do_check_for_impala_crashes = True
# TODO: Handle case for num_queries_dequeued != num_queries_submitted
num_submitted = runner._metrics[NUM_QUERIES_SUBMITTED].value
num_started_or_cancelled = \
runner._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED]
num_finished = runner._metrics[NUM_QUERIES_FINISHED].value
if num_submitted != num_finished:
# The query runner process may have crashed before updating the number
# of finished queries but after it incremented the number of queries
# submitted.
assert num_submitted - num_finished == 1
increment(runner._metrics[NUM_QUERIES_FINISHED])
if num_submitted != num_started_or_cancelled:
assert num_submitted - num_started_or_cancelled == 1
increment(runner._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
# Since we know that the runner crashed while trying to run a query, we
# count it as an 'other error'
increment(runner._metrics[NUM_OTHER_ERRORS])
self._check_successive_errors()
assert runner._metrics[NUM_QUERIES_SUBMITTED].value == \
runner._metrics[NUM_QUERIES_FINISHED].value, \
str([(k, v.value) for k, v in runner._metrics.iteritems()])
# Make sure to record all the metrics before removing this runner from the
# list.
print("Query runner ({0}) exited with exit code {1}".format(
runner.proc.pid, runner.proc.exitcode))
self._record_runner_metrics_before_evict(self._query_runners[idx])
# Remove the query runner from the list.
del self._query_runners[idx]
if do_check_for_impala_crashes:
# Since we know that at least one query runner failed, check if any of the Impala
# daemons themselves crashed.
LOG.info("Checking for Impala crashes")
if print_crash_info_if_exists(impala, self.start_time):
self.print_duration()
sys.exit(runner.proc.exitcode)
do_check_for_impala_crashes = False
LOG.info("No Impala crashes detected")
sleep(sleep_secs)
num_runners_remaining = self._num_runners_remaining()
if should_print_status:
last_report_secs += sleep_secs
if last_report_secs > 5:
if (
not self._query_producer_thread.is_alive() or
not self._query_consumer_thread.is_alive() or
not num_runners_remaining
):
LOG.debug("Producer is alive: %s" % self._query_producer_thread.is_alive())
LOG.debug("Consumer is alive: %s" % self._query_consumer_thread.is_alive())
LOG.debug("Queue size: %s" % self._query_queue.qsize())
LOG.debug("Runners: %s" % num_runners_remaining)
last_report_secs = 0
lines_printed %= 50
self._print_status(print_header=(lines_printed == 0))
lines_printed += 1
def print_duration(self):
duration = datetime.now() - self.start_time
LOG.info("Test Duration: {0:.0f} seconds".format(duration.total_seconds()))
class QueryTimeout(Exception):
pass
class QueryType(object):
COMPUTE_STATS, DELETE, INSERT, SELECT, UPDATE, UPSERT = range(6)
class Query(object):
"""Contains a SQL statement along with expected runtime information."""
def __init__(self):
self.name = None
self.sql = None
# In order to be able to make good estimates for DML queries in the binary search,
# we need to bring the table to a good initial state before excuting the sql. Running
# set_up_sql accomplishes this task.
self.set_up_sql = None
self.db_name = None
self.result_hash = None
self.required_mem_mb_with_spilling = None
self.required_mem_mb_without_spilling = None
self.solo_runtime_profile_with_spilling = None
self.solo_runtime_profile_without_spilling = None
self.solo_runtime_secs_with_spilling = None
self.solo_runtime_secs_without_spilling = None
# Query options to set before running the query.
self.options = {}
# Determines the order in which we will populate query runtime info. Queries with the
# lowest population_order property will be handled first.
self.population_order = 0
# Type of query. Can have the following values: SELECT, COMPUTE_STATS, INSERT, UPDATE,
# UPSERT, DELETE.
self.query_type = QueryType.SELECT
self._logical_query_id = None
def __repr__(self):
return dedent("""
<Query
Mem: %(required_mem_mb_with_spilling)s
Mem no-spilling: %(required_mem_mb_without_spilling)s
Solo Runtime: %(solo_runtime_secs_with_spilling)s
Solo Runtime no-spilling: %(solo_runtime_secs_without_spilling)s
DB: %(db_name)s
Options: %(options)s
Set up SQL: %(set_up_sql)s>
SQL: %(sql)s>
Population order: %(population_order)r>
""".strip() % self.__dict__)
@property
def logical_query_id(self):
"""
Return a meanginful unique str identifier for the query.
Example: "tpcds_300_decimal_parquet_q21"
"""
if self._logical_query_id is None:
self._logical_query_id = '{0}_{1}'.format(self.db_name, self.name)
return self._logical_query_id
def write_runtime_info_profiles(self, directory):
"""Write profiles for spilling and non-spilling into directory (str)."""
profiles_to_write = [
(self.logical_query_id + "_profile_with_spilling.txt",
self.solo_runtime_profile_with_spilling),
(self.logical_query_id + "_profile_without_spilling.txt",
self.solo_runtime_profile_without_spilling),
]
for filename, profile in profiles_to_write:
if profile is None:
LOG.debug("No profile recorded for {0}".format(filename))
continue
with open(os.path.join(directory, filename), "w") as fh:
fh.write(profile)
class QueryRunner(object):
"""Encapsulates functionality to run a query and provide a runtime report."""
SPILLED_PATTERNS = [re.compile("ExecOption:.*Spilled"), re.compile("SpilledRuns: [^0]")]
BATCH_SIZE = 1024
def __init__(self, stress_runner=None):
"""Creates a new instance. The caller must fill in the below fields. stress_runner
must be provided if this is running in the context of a stress run, so that statistics
can be updated."""
self.stress_runner = stress_runner
self.impalad = None
self.impalad_conn = None
self.use_kerberos = False
self.results_dir = gettempdir()
self.check_if_mem_was_spilled = False
self.common_query_options = {}
self.proc = None
# All these values are shared values between processes. We want these to be accessible
# by the parent process that started this QueryRunner, for operational purposes.
self._metrics = {
NUM_QUERIES_DEQUEUED: Value("i", 0),
NUM_QUERIES_SUBMITTED: Value("i", 0),
NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED: Value("i", 0),
NUM_QUERIES_FINISHED: Value("i", 0),
NUM_QUERIES_EXCEEDED_MEM_LIMIT: Value("i", 0),
NUM_QUERIES_AC_REJECTED: Value("i", 0),
NUM_QUERIES_AC_TIMEDOUT: Value("i", 0),
NUM_QUERIES_CANCELLED: Value("i", 0),
NUM_RESULT_MISMATCHES: Value("i", 0),
NUM_OTHER_ERRORS: Value("i", 0)}
def connect(self):
self.impalad_conn = self.impalad.impala.connect(impalad=self.impalad)
def disconnect(self):
if self.impalad_conn:
self.impalad_conn.close()
self.impalad_conn = None
def run_query(self, query, mem_limit_mb, run_set_up=False,
timeout_secs=maxint, should_cancel=False, retain_profile=False):
"""Run a query and return an execution report. If 'run_set_up' is True, set up sql
will be executed before the main query. This should be the case during the binary
search phase of the stress test.
If 'should_cancel' is True, don't get the query profile for timed out queries because
the query was purposely cancelled by setting the query timeout too short to complete,
rather than having some problem that needs to be investigated.
"""
if not self.impalad_conn:
raise Exception("connect() must first be called")
timeout_unix_time = time() + timeout_secs
report = QueryReport(query)
try:
with self.impalad_conn.cursor() as cursor:
start_time = time()
self._set_db_and_options(cursor, query, run_set_up, mem_limit_mb, timeout_secs)
error = None
try:
cursor.execute_async(
"/* Mem: %s MB. Coordinator: %s. */\n"
% (mem_limit_mb, self.impalad.host_name) + query.sql)
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Query id is %s", report.query_id)
if not self._wait_until_fetchable(cursor, report, timeout_unix_time,
should_cancel):
return report
if query.query_type == QueryType.SELECT:
try:
report.result_hash = self._hash_result(cursor, timeout_unix_time, query)
if retain_profile or \
query.result_hash and report.result_hash != query.result_hash:
fetch_and_set_profile(cursor, report)
except QueryTimeout:
self._cancel(cursor, report)
return report
else:
# If query is in error state, this will raise an exception
cursor._wait_to_finish()
except Exception as error:
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Error running query with id %s: %s", report.query_id, error)
self._check_for_memory_errors(report, cursor, error)
if report.has_query_error():
return report
report.runtime_secs = time() - start_time
if cursor.execution_failed() or self.check_if_mem_was_spilled:
fetch_and_set_profile(cursor, report)
report.mem_was_spilled = any([
pattern.search(report.profile) is not None
for pattern in QueryRunner.SPILLED_PATTERNS])
report.not_enough_memory = "Memory limit exceeded" in report.profile
except Exception as error:
# A mem limit error would have been caught above, no need to check for that here.
report.other_error = error
return report
def _set_db_and_options(self, cursor, query, run_set_up, mem_limit_mb, timeout_secs):
"""Set up a new cursor for running a query by switching to the correct database and
setting query options."""
if query.db_name:
LOG.debug("Using %s database", query.db_name)
cursor.execute("USE %s" % query.db_name)
if run_set_up and query.set_up_sql:
LOG.debug("Running set up query:\n%s", query.set_up_sql)
cursor.execute(query.set_up_sql)
for query_option, value in self.common_query_options.iteritems():
cursor.execute(
"SET {query_option}={value}".format(query_option=query_option, value=value))
for query_option, value in query.options.iteritems():
cursor.execute(
"SET {query_option}={value}".format(query_option=query_option, value=value))
cursor.execute("SET ABORT_ON_ERROR=1")
if self.test_admission_control:
LOG.debug(
"Running query without mem limit at %s with timeout secs %s:\n%s",
self.impalad.host_name, timeout_secs, query.sql)
else:
LOG.debug("Setting mem limit to %s MB", mem_limit_mb)
cursor.execute("SET MEM_LIMIT=%sM" % mem_limit_mb)
LOG.debug(
"Running query with %s MB mem limit at %s with timeout secs %s:\n%s",
mem_limit_mb, self.impalad.host_name, timeout_secs, query.sql)
def _wait_until_fetchable(self, cursor, report, timeout_unix_time, should_cancel):
"""Wait up until timeout_unix_time until the query results can be fetched (if it's
a SELECT query) or until it has finished executing (if it's a different query type
like DML). If the timeout expires we either cancel the query or report the timeout.
Return True in the first case or False in the second (timeout) case."""
# Loop until the query gets to the right state or a timeout expires.
sleep_secs = 0.1
secs_since_log = 0
# True if we incremented num_queries_started_running_or_cancelled for this query.
started_running_or_cancelled = False
while True:
query_state = cursor.status()
# Check if the query got past the PENDING/INITIALIZED states, either because
# it's executing or hit an error.
if (not started_running_or_cancelled and query_state not in ('PENDING_STATE',
'INITIALIZED_STATE')):
started_running_or_cancelled = True
increment(self._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
# Return if we're ready to fetch results (in the FINISHED state) or we are in
# another terminal state like EXCEPTION.
if query_state not in ('PENDING_STATE', 'INITIALIZED_STATE', 'RUNNING_STATE'):
return True
if time() > timeout_unix_time:
if not should_cancel:
fetch_and_set_profile(cursor, report)
self._cancel(cursor, report)
if not started_running_or_cancelled:
increment(self._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
return False
if secs_since_log > 5:
secs_since_log = 0
LOG.debug("Waiting for query to execute")
sleep(sleep_secs)
secs_since_log += sleep_secs
def update_from_query_report(self, report):
LOG.debug("Updating runtime stats (Query Runner PID: {0})".format(self.proc.pid))
increment(self._metrics[NUM_QUERIES_FINISHED])
if report.not_enough_memory:
increment(self._metrics[NUM_QUERIES_EXCEEDED_MEM_LIMIT])
if report.ac_rejected:
increment(self._metrics[NUM_QUERIES_AC_REJECTED])
if report.ac_timedout:
increment(self._metrics[NUM_QUERIES_AC_TIMEDOUT])
if report.was_cancelled:
increment(self._metrics[NUM_QUERIES_CANCELLED])
def _cancel(self, cursor, report):
report.timed_out = True
if not report.query_id:
return
try:
LOG.debug("Attempting cancellation of query with id %s", report.query_id)
cursor.cancel_operation()
LOG.debug("Sent cancellation request for query with id %s", report.query_id)
except Exception as e:
LOG.debug("Error cancelling query with id %s: %s", report.query_id, e)
try:
LOG.debug("Attempting to cancel query through the web server.")
self.impalad.cancel_query(report.query_id)
except Exception as e:
LOG.debug("Error cancelling query %s through the web server: %s",
report.query_id, e)
def _check_for_memory_errors(self, report, cursor, caught_exception):
"""To be called after a query failure to check for signs of failed due to a
mem limit or admission control rejection/timeout. The report will be updated
accordingly.
"""
fetch_and_set_profile(cursor, report)
caught_msg = str(caught_exception).lower().strip()
# Distinguish error conditions based on string fragments. The AC rejection and
# out-of-memory conditions actually overlap (since some memory checks happen in
# admission control) so check the out-of-memory conditions first.
if "memory limit exceeded" in caught_msg or \
"repartitioning did not reduce the size of a spilled partition" in caught_msg or \
"failed to get minimum memory reservation" in caught_msg or \
"minimum memory reservation is greater than" in caught_msg or \
"minimum memory reservation needed is greater than" in caught_msg:
report.not_enough_memory = True
return
if "rejected query from pool" in caught_msg:
report.ac_rejected = True
return
if "admission for query exceeded timeout" in caught_msg:
report.ac_timedout = True
return
LOG.debug("Non-mem limit error for query with id %s: %s", report.query_id,
caught_exception, exc_info=True)
report.other_error = caught_exception
def _hash_result(self, cursor, timeout_unix_time, query):
"""Returns a hash that is independent of row order. 'query' is only used for debug
logging purposes (if the result is not as expected a log file will be left for
investigations).
"""
query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
# A value of 1 indicates that the hash thread should continue to work.
should_continue = Value("i", 1)
def hash_result_impl():
result_log = None
try:
file_name = '_'.join([query.logical_query_id, query_id.replace(":", "_")])
if query.result_hash is None:
file_name += "_initial"
file_name += "_results.txt"
result_log = open(os.path.join(self.results_dir, RESULT_HASHES_DIR, file_name),
"w")
result_log.write(query.sql)
result_log.write("\n")
current_thread().result = 1
while should_continue.value:
LOG.debug(
"Fetching result for query with id %s",
op_handle_to_query_id(
cursor._last_operation.handle if cursor._last_operation else None))
rows = cursor.fetchmany(self.BATCH_SIZE)
if not rows:
LOG.debug(
"No more results for query with id %s",
op_handle_to_query_id(
cursor._last_operation.handle if cursor._last_operation else None))
return
for row in rows:
for idx, val in enumerate(row):
if val is None:
# The hash() of None can change from run to run since it's based on
# a memory address. A chosen value will be used instead.
val = 38463209
elif isinstance(val, float):
# Floats returned by Impala may not be deterministic, the ending
# insignificant digits may differ. Only the first 6 digits will be used
# after rounding.
sval = "%f" % val
dot_idx = sval.find(".")
val = round(val, 6 - dot_idx)
current_thread().result += (idx + 1) * hash(val)
# Modulo the result to keep it "small" otherwise the math ops can be slow
# since python does infinite precision math.
current_thread().result %= maxint
if result_log:
result_log.write(str(val))
result_log.write("\t")
result_log.write(str(current_thread().result))
result_log.write("\n")
except Exception as e:
current_thread().error = e
finally:
if result_log is not None:
result_log.close()
if (
current_thread().error is not None and
current_thread().result == query.result_hash
):
os.remove(result_log.name)
hash_thread = create_and_start_daemon_thread(
hash_result_impl, "Fetch Results %s" % query_id)
hash_thread.join(max(timeout_unix_time - time(), 0))
if hash_thread.is_alive():
should_continue.value = 0
raise QueryTimeout()
if hash_thread.error:
raise hash_thread.error
return hash_thread.result
def load_tpc_queries(workload):
"""Returns a list of TPC queries. 'workload' should either be 'tpch' or 'tpcds'."""
LOG.info("Loading %s queries", workload)
queries = []
for query_name, query_sql in test_file_parser.load_tpc_queries(workload,
include_stress_queries=True).iteritems():
query = Query()
query.name = query_name
query.sql = query_sql
queries.append(query)
return queries
def load_queries_from_test_file(file_path, db_name=None):
LOG.debug("Loading queries from %s", file_path)
test_cases = test_file_parser.parse_query_test_file(file_path)
queries = list()
for test_case in test_cases:
query = Query()
query.sql = test_file_parser.remove_comments(test_case["QUERY"])
query.db_name = db_name
queries.append(query)
return queries
def load_random_queries_and_populate_runtime_info(
query_generator, model_translator, tables, impala, converted_args
):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
LOG.info("Generating random queries")
def generate_candidates():
while True:
query_model = query_generator.generate_statement(tables)
sql = model_translator.write_query(query_model)
query = Query()
query.sql = sql
query.db_name = converted_args.random_db
yield query
return populate_runtime_info_for_random_queries(
impala, generate_candidates(), converted_args)
def populate_runtime_info_for_random_queries(impala, candidate_queries, converted_args):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
start_time = datetime.now()
queries = list()
# TODO(IMPALA-4632): Consider running reset_databases() here if we want to extend DML
# functionality to random stress queries as well.
for query in candidate_queries:
try:
populate_runtime_info(
query, impala, converted_args,
timeout_secs=converted_args.random_query_timeout_seconds)
queries.append(query)
except Exception as e:
# Ignore any non-fatal errors. These could be query timeouts or bad queries (
# query generator bugs).
if print_crash_info_if_exists(impala, start_time):
raise e
LOG.warn(
"Error running query (the test will continue)\n%s\n%s",
e, query.sql, exc_info=True)
if len(queries) == converted_args.random_query_count:
break
return queries
def populate_runtime_info(query, impala, converted_args, timeout_secs=maxint):
"""Runs the given query by itself repeatedly until the minimum memory is determined
with and without spilling. Potentially all fields in the Query class (except
'sql') will be populated by this method. 'required_mem_mb_without_spilling' and
the corresponding runtime field may still be None if the query could not be run
without spilling.
converted_args.samples and converted_args.max_conflicting_samples control the
reliability of the collected information. The problem is that memory spilling or usage
may differ (by a large amount) from run to run due to races during execution. The
parameters provide a way to express "X out of Y runs must have resulted in the same
outcome". Increasing the number of samples and decreasing the tolerance (max conflicts)
increases confidence but also increases the time to collect the data.
"""
LOG.info("Collecting runtime info for query %s: \n%s", query.name, query.sql)
samples = converted_args.samples
max_conflicting_samples = converted_args.max_conflicting_samples
results_dir = converted_args.results_dir
mem_limit_eq_threshold_mb = converted_args.mem_limit_eq_threshold_mb
mem_limit_eq_threshold_percent = converted_args.mem_limit_eq_threshold_percent
runner = QueryRunner()
runner.check_if_mem_was_spilled = True
runner.common_query_options = converted_args.common_query_options
runner.test_admission_control = converted_args.test_admission_control
runner.impalad = impala.impalads[0]
runner.results_dir = results_dir
runner.use_kerberos = converted_args.use_kerberos
runner.connect()
limit_exceeded_mem = 0
non_spill_mem = None
spill_mem = None
report = None
mem_limit = None
old_required_mem_mb_without_spilling = query.required_mem_mb_without_spilling
old_required_mem_mb_with_spilling = query.required_mem_mb_with_spilling
profile_error_prefix = query.logical_query_id + "_binsearch_error"
# TODO: This method is complicated enough now that breaking it out into a class may be
# helpful to understand the structure.
def update_runtime_info():
required_mem = min(mem_limit, impala.min_impalad_mem_mb)
if report.mem_was_spilled:
if (
query.required_mem_mb_with_spilling is None or
required_mem < query.required_mem_mb_with_spilling
):
query.required_mem_mb_with_spilling = required_mem
query.solo_runtime_secs_with_spilling = report.runtime_secs
query.solo_runtime_profile_with_spilling = report.profile
elif (
query.required_mem_mb_without_spilling is None or
required_mem < query.required_mem_mb_without_spilling
):
query.required_mem_mb_without_spilling = required_mem
query.solo_runtime_secs_without_spilling = report.runtime_secs
assert report.runtime_secs is not None, report
query.solo_runtime_profile_without_spilling = report.profile
def get_report(desired_outcome=None):
reports_by_outcome = defaultdict(list)
leading_outcome = None
for remaining_samples in xrange(samples - 1, -1, -1):
report = runner.run_query(query, mem_limit, run_set_up=True,
timeout_secs=timeout_secs, retain_profile=True)
if report.timed_out:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise QueryTimeout(
"query {0} timed out during binary search".format(query.logical_query_id))
if report.other_error:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"query {0} errored during binary search: {1}".format(
query.logical_query_id, str(report.other_error)))
LOG.debug("Spilled: %s" % report.mem_was_spilled)
if not report.has_query_error():
if query.result_hash is None:
query.result_hash = report.result_hash
elif query.result_hash != report.result_hash:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"Result hash mismatch for query %s; expected %s, got %s" %
(query.logical_query_id, query.result_hash, report.result_hash))
if report.not_enough_memory:
outcome = "EXCEEDED"
elif report.mem_was_spilled:
outcome = "SPILLED"
else:
outcome = "NOT_SPILLED"
reports_by_outcome[outcome].append(report)
if not leading_outcome:
leading_outcome = outcome
continue
if len(reports_by_outcome[outcome]) > len(reports_by_outcome[leading_outcome]):
leading_outcome = outcome
if len(reports_by_outcome[leading_outcome]) + max_conflicting_samples == samples:
break
if (
len(reports_by_outcome[leading_outcome]) + remaining_samples <
samples - max_conflicting_samples
):
return
if desired_outcome \
and len(reports_by_outcome[desired_outcome]) + remaining_samples \
< samples - max_conflicting_samples:
return
reports = reports_by_outcome[leading_outcome]
reports.sort(key=lambda r: r.runtime_secs)
return reports[len(reports) / 2]
if not any((old_required_mem_mb_with_spilling, old_required_mem_mb_without_spilling)):
mem_estimate = estimate_query_mem_mb_usage(query, runner)
LOG.info("Finding a starting point for binary search")
mem_limit = min(mem_estimate, impala.min_impalad_mem_mb) or impala.min_impalad_mem_mb
while True:
LOG.info("Next mem_limit: {0}".format(mem_limit))
report = get_report()
if not report or report.not_enough_memory:
if report and report.not_enough_memory:
limit_exceeded_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
LOG.warn(
"Query couldn't be run even when using all available memory\n%s", query.sql)
return
mem_limit = min(2 * mem_limit, impala.min_impalad_mem_mb)
continue
update_runtime_info()
if report.mem_was_spilled:
spill_mem = mem_limit
else:
non_spill_mem = mem_limit
break
LOG.info("Finding minimum memory required to avoid spilling")
lower_bound = max(limit_exceeded_mem, spill_mem)
upper_bound = min(non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_without_spilling:
mem_limit = old_required_mem_mb_without_spilling
old_required_mem_mb_without_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome=("NOT_SPILLED" if spill_mem else None))
if not report:
lower_bound = mem_limit
elif report.not_enough_memory:
lower_bound = mem_limit
limit_exceeded_mem = mem_limit
else:
update_runtime_info()
if report.mem_was_spilled:
lower_bound = mem_limit
spill_mem = min(spill_mem, mem_limit)
else:
upper_bound = mem_limit
non_spill_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
break
if should_break:
if non_spill_mem:
break
lower_bound = upper_bound = impala.min_impalad_mem_mb
# This value may be updated during the search for the absolute minimum.
LOG.info(
"Minimum memory to avoid spilling: %s MB" % query.required_mem_mb_without_spilling)
LOG.info("Finding absolute minimum memory required")
lower_bound = limit_exceeded_mem
upper_bound = min(
spill_mem or maxint, non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_with_spilling:
mem_limit = old_required_mem_mb_with_spilling
old_required_mem_mb_with_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome="SPILLED")
if not report or report.not_enough_memory:
lower_bound = mem_limit
else:
update_runtime_info()
upper_bound = mem_limit
if should_break:
if not query.required_mem_mb_with_spilling:
if upper_bound - mem_limit < mem_limit_eq_threshold_mb:
# IMPALA-6604: A fair amount of queries go down this path.
LOG.info(
"Unable to find a memory limit with spilling within the threshold of {0} "
"MB. Using the same memory limit for both.".format(
mem_limit_eq_threshold_mb))
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = \
query.solo_runtime_profile_without_spilling
break
LOG.info("Minimum memory is %s MB" % query.required_mem_mb_with_spilling)
if (
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling < query.required_mem_mb_with_spilling
):
# Query execution is not deterministic and sometimes a query will run without spilling
# at a lower mem limit than it did with spilling. In that case, just use the lower
# value.
LOG.info(
"A lower memory limit to avoid spilling was found while searching for"
" the absolute minimum memory.")
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = query.solo_runtime_profile_without_spilling
LOG.debug("Query after populating runtime info: %s", query)
def estimate_query_mem_mb_usage(query, query_runner):
"""Runs an explain plan then extracts and returns the estimated memory needed to run
the query.
"""
with query_runner.impalad_conn.cursor() as cursor:
LOG.debug("Using %s database", query.db_name)
if query.db_name:
cursor.execute('USE ' + query.db_name)
if query.query_type == QueryType.COMPUTE_STATS:
# Running "explain" on compute stats is not supported by Impala.
return
LOG.debug("Explaining query\n%s", query.sql)
cursor.execute('EXPLAIN ' + query.sql)
explain_rows = cursor.fetchall()
explain_lines = [row[0] for row in explain_rows]
mem_limit, units = match_memory_estimate(explain_lines)
return parse_mem_to_mb(mem_limit, units)
def save_runtime_info(path, query, impala):
"""Updates the file at 'path' with the given query information."""
store = None
if os.path.exists(path):
with open(path) as file:
store = json.load(file)
_check_store_version(store)
if not store:
store = {
"host_names": list(), "db_names": dict(), "version": RUNTIME_INFO_FILE_VERSION}
with open(path, "w+") as file:
store["host_names"] = sorted([i.host_name for i in impala.impalads])
queries = store["db_names"].get(query.db_name, dict())
query_by_options = queries.get(query.sql, dict())
query_by_options[str(sorted(query.options.items()))] = query
queries[query.sql] = query_by_options
store["db_names"][query.db_name] = queries
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
data = dict(obj.__dict__)
# Queries are stored by sql, so remove the duplicate data. Also don't store
# profiles as JSON values, but instead separately.
for k in ("sql", "solo_runtime_profile_with_spilling",
"solo_runtime_profile_without_spilling"):
if k in data:
del data[k]
return data
json.dump(
store, file, cls=JsonEncoder, sort_keys=True, indent=2, separators=(',', ': '))
def load_runtime_info(path, impala=None):
"""Reads the query runtime information at 'path' and returns a
dict<db_name, dict<sql, Query>>. Returns an empty dict if the hosts in the 'impala'
instance do not match the data in 'path'.
"""
queries_by_db_and_sql = defaultdict(lambda: defaultdict(dict))
if not os.path.exists(path):
return queries_by_db_and_sql
with open(path) as file:
store = json.load(file)
_check_store_version(store)
if (
impala and
store.get("host_names") != sorted([i.host_name for i in impala.impalads])
):
return queries_by_db_and_sql
for db_name, queries_by_sql in store["db_names"].iteritems():
for sql, queries_by_options in queries_by_sql.iteritems():
for options, json_query in queries_by_options.iteritems():
query = Query()
query.__dict__.update(json_query)
query.sql = sql
queries_by_db_and_sql[db_name][sql][options] = query
return queries_by_db_and_sql
def _check_store_version(store):
"""Clears 'store' if the version is too old or raises an error if the version is too
new.
"""
if store["version"] < RUNTIME_INFO_FILE_VERSION:
LOG.warn("Runtime file info version is old and will be ignored")
store.clear()
elif store["version"] > RUNTIME_INFO_FILE_VERSION:
raise Exception(
"Unexpected runtime file info version %s expected %s"
% (store["version"], RUNTIME_INFO_FILE_VERSION))
def print_runtime_info_comparison(old_runtime_info, new_runtime_info):
# TODO: Provide a way to call this from the CLI. This was hard coded to run from main()
# when it was used.
print(",".join([
"Database", "Query",
"Old Mem MB w/Spilling",
"New Mem MB w/Spilling",
"Diff %",
"Old Runtime w/Spilling",
"New Runtime w/Spilling",
"Diff %",
"Old Mem MB wout/Spilling",
"New Mem MB wout/Spilling",
"Diff %",
"Old Runtime wout/Spilling",
"New Runtime wout/Spilling",
"Diff %"]))
for db_name, old_queries in old_runtime_info.iteritems():
new_queries = new_runtime_info.get(db_name)
if not new_queries:
continue
for sql, old_query in old_queries.iteritems():
new_query = new_queries.get(sql)
if not new_query:
continue
sys.stdout.write(old_query["db_name"])
sys.stdout.write(",")
sys.stdout.write(old_query["name"])
sys.stdout.write(",")
for attr in [
"required_mem_mb_with_spilling", "solo_runtime_secs_with_spilling",
"required_mem_mb_without_spilling", "solo_runtime_secs_without_spilling"
]:
old_value = old_query[attr]
sys.stdout.write(str(old_value))
sys.stdout.write(",")
new_value = new_query[attr]
sys.stdout.write(str(new_value))
sys.stdout.write(",")
if old_value and new_value is not None:
sys.stdout.write("%0.2f%%" % (100 * float(new_value - old_value) / old_value))
else:
sys.stdout.write("N/A")
sys.stdout.write(",")
print()
def generate_DML_queries(cursor, dml_mod_values):
"""Generate insert, upsert, update, delete DML statements.
For each table in the database that cursor is connected to, create 4 DML queries
(insert, upsert, update, delete) for each mod value in 'dml_mod_values'. This value
controls which rows will be affected. The generated queries assume that for each table
in the database, there exists a table with a '_original' suffix that is never modified.
This function has some limitations:
1. Only generates DML statements against Kudu tables, and ignores non-Kudu tables.
2. Requires that the type of the first column of the primary key is an integer type.
"""
LOG.info("Generating DML queries")
tables = [cursor.describe_table(t) for t in cursor.list_table_names()
if not t.endswith("_original")]
result = []
for table in tables:
if not table.primary_keys:
# Skip non-Kudu tables. If a table has no primary keys, then it cannot be a Kudu
# table.
LOG.debug("Skipping table '{0}' because it has no primary keys.".format(table.name))
continue
if len(table.primary_keys) > 1:
# TODO(IMPALA-4665): Add support for tables with multiple primary keys.
LOG.debug("Skipping table '{0}' because it has more than "
"1 primary key column.".format(table.name))
continue
primary_key = table.primary_keys[0]
if primary_key.exact_type not in (Int, TinyInt, SmallInt, BigInt):
# We want to be able to apply the modulo operation on the primary key. If the
# the first primary key column happens to not be an integer, we will skip
# generating queries for this table
LOG.debug("Skipping table '{0}' because the first column '{1}' in the "
"primary key is not an integer.".format(table.name, primary_key.name))
continue
for mod_value in dml_mod_values:
# Insert
insert_query = Query()
# Populate runtime info for Insert and Upsert queries before Update and Delete
# queries because tables remain in original state after running the Insert and
# Upsert queries. During the binary search in runtime info population for the
# Insert query, we first delete some rows and then reinsert them, so the table
# remains in the original state. For the delete, the order is reversed, so the table
# is not in the original state after running the the delete (or update) query. This
# is why population_order is smaller for Insert and Upsert queries.
insert_query.population_order = 1
insert_query.query_type = QueryType.INSERT
insert_query.name = "insert_{0}".format(table.name)
insert_query.db_name = cursor.db_name
insert_query.sql = (
"INSERT INTO TABLE {0} SELECT * FROM {0}_original "
"WHERE {1} % {2} = 0").format(table.name, primary_key.name, mod_value)
# Upsert
upsert_query = Query()
upsert_query.population_order = 1
upsert_query.query_type = QueryType.UPSERT
upsert_query.name = "upsert_{0}".format(table.name)
upsert_query.db_name = cursor.db_name
upsert_query.sql = (
"UPSERT INTO TABLE {0} SELECT * "
"FROM {0}_original WHERE {1} % {2} = 0").format(
table.name, primary_key.name, mod_value)
# Update
update_query = Query()
update_query.population_order = 2
update_query.query_type = QueryType.UPDATE
update_query.name = "update_{0}".format(table.name)
update_query.db_name = cursor.db_name
update_list = ', '.join(
'a.{0} = b.{0}'.format(col.name)
for col in table.cols if not col.is_primary_key)
update_query.sql = (
"UPDATE a SET {update_list} FROM {table_name} a JOIN {table_name}_original b "
"ON a.{pk} = b.{pk} + 1 WHERE a.{pk} % {mod_value} = 0").format(
table_name=table.name, pk=primary_key.name, mod_value=mod_value,
update_list=update_list)
# Delete
delete_query = Query()
delete_query.population_order = 2
delete_query.query_type = QueryType.DELETE
delete_query.name = "delete_{0}".format(table.name)
delete_query.db_name = cursor.db_name
delete_query.sql = ("DELETE FROM {0} WHERE {1} % {2} = 0").format(
table.name, primary_key.name, mod_value)
if table.name + "_original" in set(table.name for table in tables):
insert_query.set_up_sql = "DELETE FROM {0} WHERE {1} % {2} = 0".format(
table.name, primary_key.name, mod_value)
upsert_query.set_up_sql = insert_query.set_up_sql
update_query.set_up_sql = (
"UPSERT INTO TABLE {0} SELECT * FROM {0}_original "
"WHERE {1} % {2} = 0").format(table.name, primary_key.name, mod_value)
delete_query.set_up_sql = update_query.set_up_sql
result.append(insert_query)
LOG.debug("Added insert query: {0}".format(insert_query))
result.append(update_query)
LOG.debug("Added update query: {0}".format(update_query))
result.append(upsert_query)
LOG.debug("Added upsert query: {0}".format(upsert_query))
result.append(delete_query)
LOG.debug("Added delete query: {0}".format(delete_query))
assert len(result) > 0, "No DML queries were added."
return result
def generate_compute_stats_queries(cursor):
"""For each table in the database that cursor is connected to, generate several compute
stats queries. Each query will have a different value for the MT_DOP query option.
"""
LOG.info("Generating Compute Stats queries")
tables = [cursor.describe_table(t) for t in cursor.list_table_names()
if not t.endswith("_original")]
result = []
mt_dop_values = [str(2**k) for k in range(5)]
for table in tables:
for mt_dop_value in mt_dop_values:
compute_query = Query()
compute_query.population_order = 1
compute_query.query_type = QueryType.COMPUTE_STATS
compute_query.sql = "COMPUTE STATS {0}".format(table.name)
compute_query.options["MT_DOP"] = mt_dop_value
compute_query.db_name = cursor.db_name
compute_query.name = "compute_stats_{0}_mt_dop_{1}".format(
table.name, compute_query.options["MT_DOP"])
result.append(compute_query)
LOG.debug("Added compute stats query: {0}".format(compute_query))
return result
def prepare_database(cursor):
"""For each table in the database that cursor is connected to, create an identical copy
with '_original' suffix. This function is idempotent.
Note: At this time we only support Kudu tables with a simple hash partitioning based on
the primary key. (SHOW CREATE TABLE would not work otherwise.)
"""
tables = dict((t, cursor.describe_table(t)) for t in cursor.list_table_names())
for table_name in tables:
if not table_name.endswith("_original") and table_name + "_original" not in tables:
LOG.debug("Creating original table: {0}".format(table_name))
cursor.execute("SHOW CREATE TABLE " + table_name)
create_sql = cursor.fetchone()[0]
search_pattern = r"CREATE TABLE (\w*)\.(.*) \("
replacement = "CREATE TABLE {tbl} (".format(tbl=table_name + "_original")
create_original_sql = re.sub(
search_pattern, replacement, create_sql, count=1)
LOG.debug("Create original SQL:\n{0}".format(create_original_sql))
cursor.execute(create_original_sql)
cursor.execute("INSERT INTO {0}_original SELECT * FROM {0}".format(table_name))
cursor.execute("COMPUTE STATS {0}".format(table_name + "_original"))
def reset_databases(cursor):
"""Reset the database to the initial state. This is done by overwriting tables which
don't have the _original suffix with data from tables with the _original suffix.
Note: At this time we only support Kudu tables with a simple hash partitioning based on
the primary key. (SHOW CREATE TABLE would not work otherwise.)
"""
LOG.info("Resetting {0} database".format(cursor.db_name))
tables = dict((t, cursor.describe_table(t)) for t in cursor.list_table_names())
for table_name in tables:
if not table_name.endswith("_original"):
if table_name + "_original" in tables:
cursor.execute("SHOW CREATE TABLE " + table_name)
create_table_command = cursor.fetchone()[0]
cursor.execute("DROP TABLE {0}".format(table_name))
cursor.execute(create_table_command)
cursor.execute("INSERT INTO {0} SELECT * FROM {0}_original".format(table_name))
cursor.execute("COMPUTE STATS {0}".format(table_name))
else:
LOG.debug("Table '{0}' cannot be reset because '{0}_original' does not"
" exist in '{1}' database.".format(table_name, cursor.db_name))
def populate_all_queries(
queries, impala, converted_args, queries_with_runtime_info_by_db_sql_and_options
):
"""Populate runtime info for all queries, ordered by the population_order property."""
result = []
queries_by_order = {}
for query in queries:
if query.population_order not in queries_by_order:
queries_by_order[query.population_order] = []
queries_by_order[query.population_order].append(query)
for population_order in sorted(queries_by_order.keys()):
for query in queries_by_order[population_order]:
if (
query.sql in
queries_with_runtime_info_by_db_sql_and_options[query.db_name] and
str(sorted(query.options.items())) in
queries_with_runtime_info_by_db_sql_and_options[query.db_name][query.sql]
):
LOG.debug("Reusing previous runtime data for query: " + query.sql)
result.append(queries_with_runtime_info_by_db_sql_and_options[
query.db_name][query.sql][str(sorted(query.options.items()))])
else:
populate_runtime_info(query, impala, converted_args)
save_runtime_info(converted_args.runtime_info_path, query, impala)
query.write_runtime_info_profiles(
os.path.join(converted_args.results_dir, PROFILES_DIR))
result.append(query)
return result
def fetch_and_set_profile(cursor, report):
"""Set the report's query profile using the given cursor.
Producing a query profile can be somewhat expensive. A v-tune profile of
impalad showed 10% of cpu time spent generating query profiles.
"""
if not report.profile and cursor._last_operation:
try:
report.profile = cursor.get_profile()
except Exception as e:
LOG.debug("Error getting profile for query with id %s: %s", report.query_id, e)
def print_version(cluster):
"""
Print the cluster impalad version info to the console sorted by hostname.
"""
def _sorter(i1, i2):
return cmp(i1.host_name, i2.host_name)
version_info = cluster.impala.get_version_info()
print("Cluster Impalad Version Info:")
for impalad in sorted(version_info.keys(), cmp=_sorter):
print("{0}: {1}".format(impalad.host_name, version_info[impalad]))
def main():
parser = ArgumentParser(
epilog=dedent("""
Before running this script a CM cluster must be setup and any needed data
such as TPC-H/DS must be loaded. The first time this script is run it will
find memory limits and runtimes for each query and save the data to disk (since
collecting the data is slow) at --runtime-info-path then run the stress test.
Later runs will reuse the saved memory limits and timings. If the cluster changes
significantly the memory limits should be re-measured (deleting the file at
--runtime-info-path will cause re-measuring to happen).""").strip(),
formatter_class=ArgumentDefaultsHelpFormatter)
cli_options.add_logging_options(parser)
cli_options.add_cluster_options(parser)
cli_options.add_kerberos_options(parser)
cli_options.add_ssl_options(parser)
parser.add_argument(
"--runtime-info-path",
default=os.path.join(gettempdir(), "{cm_host}_query_runtime_info.json"),
help="The path to store query runtime info at. '{cm_host}' will be replaced with"
" the actual host name from --cm-host.")
parser.add_argument(
"--samples", default=1, type=int,
help='Used when collecting "runtime info" - the number of samples to collect when'
' testing a particular mem limit value.')
parser.add_argument(
"--max-conflicting-samples", default=0, type=int,
help='Used when collecting "runtime info" - the number of samples outcomes that'
' can disagree when deciding to accept a particular mem limit. Ex, when trying to'
' determine the mem limit that avoids spilling with samples=5 and'
' max-conflicting-samples=1, then 4/5 queries must not spill at a particular mem'
' limit.')
parser.add_argument(
"--mem-limit-eq-threshold-percent", default=0.025,
type=float, help='Used when collecting "runtime info". If the difference between'
' two memory limits is less than this percentage, we consider the two limits to'
' be equal and stop the memory binary search.')
parser.add_argument(
"--mem-limit-eq-threshold-mb", default=50,
type=int, help='Used when collecting "runtime info". If the difference between'
' two memory limits is less than this value in MB, we consider the two limits to'
' be equal and stop the memory binary search.')
parser.add_argument(
"--results-dir", default=gettempdir(),
help="Directory under which the profiles and result_hashes directories are created."
" Query hash results are written in the result_hashes directory. If query results"
" do not match, a log file will be left in that dir. The log file is also created"
" during the first run when runtime info is collected for each query. Unexpected"
" query timeouts, exceeded memory, failures or result mismatches will result in a"
" profile written in the profiles directory.")
parser.add_argument(
"--no-status", action="store_true", help="Do not print the status table.")
parser.add_argument(
"--cancel-current-queries", action="store_true",
help="Cancel any queries running on the cluster before beginning.")
parser.add_argument(
"--filter-query-mem-ratio", type=float, default=0.333,
help="Queries that require this ratio of total available memory will be filtered.")
parser.add_argument(
"--startup-queries-per-second", type=float, default=2.0,
help="Adjust this depending on the cluster size and workload. This determines"
" the minimum amount of time between successive query submissions when"
" the workload is initially ramping up.")
parser.add_argument(
"--fail-upon-successive-errors", type=int, default=1,
help="Continue running until N query errors are encountered in a row. Set"
" this to a high number to only stop when something catastrophic happens. A"
" value of 1 stops upon the first error.")
parser.add_argument(
"--mem-limit-padding-pct", type=int, default=25,
help="Pad query mem limits found by solo execution with this percentage when"
" running concurrently. After padding queries will not be expected to fail"
" due to mem limit exceeded.")
parser.add_argument(
"--mem-limit-padding-abs", type=int, default=0,
help="Pad query mem limits found by solo execution with this value (in megabytes)"
" running concurrently. After padding queries will not be expected to fail"
" due to mem limit exceeded. This is useful if we want to be able to add the same"
" amount of memory to smaller queries as to the big ones.")
parser.add_argument(
"--timeout-multiplier", type=float, default=1.0,
help="Deprecated - has no effect.")
parser.add_argument("--max-queries", type=int, default=100)
parser.add_argument(
"--reset-databases-before-binary-search", action="store_true",
help="If True, databases will be reset to their original state before the binary"
" search.")
parser.add_argument(
"--reset-databases-after-binary-search", action="store_true",
help="If True, databases will be reset to their original state after the binary"
" search and before starting the stress test. The primary intent of this option is"
" to undo the changes made to the databases by the binary search. This option can"
" also be used to reset the databases before running other (non stress) tests on"
" the same data.")
parser.add_argument(
"--generate-dml-queries", action="store_true",
help="If True, DML queries will be generated for Kudu databases.")
parser.add_argument(
"--dml-mod-values", nargs="+", type=int, default=[11],
help="List of mod values to use for the DML queries. There will be 4 DML (delete,"
" insert, update, upsert) queries generated per mod value per table. The smaller"
" the value, the more rows the DML query would touch (the query should touch about"
" 1/mod_value rows.)")
parser.add_argument(
"--generate-compute-stats-queries", action="store_true",
help="If True, Compute Stats queries will be generated.")
parser.add_argument(
"--select-probability", type=float, default=0.5,
help="Probability of choosing a select query (as opposed to a DML query).")
parser.add_argument("--tpcds-db", help="If provided, TPC-DS queries will be used.")
parser.add_argument("--tpch-db", help="If provided, TPC-H queries will be used.")
parser.add_argument(
"--tpch-nested-db", help="If provided, nested TPC-H queries will be used.")
parser.add_argument(
"--tpch-kudu-db", help="If provided, TPC-H queries for Kudu will be used.")
parser.add_argument(
"--tpcds-kudu-db", help="If provided, TPC-DS queries for Kudu will be used.")
parser.add_argument(
"--random-db", help="If provided, random queries will be used.")
parser.add_argument(
"--random-query-count", type=int, default=50,
help="The number of random queries to generate.")
parser.add_argument(
"--random-query-timeout-seconds", type=int, default=(5 * 60),
help="A random query that runs longer than this time when running alone will"
" be discarded.")
parser.add_argument(
"--query-file-path", help="Use queries in the given file. The file"
" format must be the same as standard test case format. Queries are expected to "
" be randomly generated and will be validated before running in stress mode.")
parser.add_argument(
"--query-file-db",
help="The name of the database to use with the queries from --query-file-path.")
parser.add_argument("--mem-overcommit-pct", type=float, default=0)
parser.add_argument(
"--mem-spill-probability", type=float, default=0.33, dest="spill_probability",
help="The probability that a mem limit will be set low enough to induce spilling.")
parser.add_argument(
"--mem-leak-check-interval-mins", type=int, default=None,
help="Periodically stop query execution and check that memory levels have reset.")
parser.add_argument(
"--cancel-probability", type=float, default=0.1,
help="The probability a query will be cancelled.")
parser.add_argument("--nlj-filter", help=SUPPRESS) # Made a no-op by IMPALA-7440.
parser.add_argument(
"--common-query-options", default=None, nargs="*",
help="Space-delimited string of query options and values. This is a freeform "
"string with little regard to whether you've spelled the query options correctly "
"or set valid values. Example: --common-query-options "
"DISABLE_CODEGEN=true RUNTIME_FILTER_MODE=1")
parser.add_argument(
"--test-admission-control", type=bool, default=False,
help="If true, assume that the Impala cluster under test is using memory-based "
"admission control and should not admit queries that cannot be run to completion. "
"In this mode the stress runner does not set mem_limit on queries and "
"out-of-memory errors are not expected in this mode so will fail the stress test "
"if encountered. The stress runner still tracks the 'admitted' memory so that "
"it can try to submit more queries than there is available memory for.")
parser.add_argument(
"--max-coordinators", default=0, type=int, metavar="max coordinators",
help="If > 0, submit queries to at most this number of coordinators."
"This is useful in conjunction with --test-admission-control to test behaviour "
"with a smaller number of admission controller instances.")
args = parser.parse_args()
converted_args = StressArgConverter(args)
cli_options.configure_logging(
args.log_level, debug_log_file=args.debug_log_file, log_thread_name=True,
log_process_id=True)
LOG.debug("CLI args: %s" % (args, ))
if (
not args.tpcds_db and not args.tpch_db and not args.random_db and not
args.tpch_nested_db and not args.tpch_kudu_db and not
args.tpcds_kudu_db and not args.query_file_path
):
raise Exception(
"At least one of --tpcds-db, --tpch-db, --tpch-kudu-db,"
"--tpcds-kudu-db, --tpch-nested-db, --random-db, --query-file-path is required")
result_hashes_path = os.path.join(args.results_dir, RESULT_HASHES_DIR)
if not os.path.isdir(result_hashes_path):
os.makedirs(result_hashes_path)
results_dir_path = os.path.join(args.results_dir, PROFILES_DIR)
if not os.path.isdir(results_dir_path):
os.makedirs(results_dir_path)
cluster = cli_options.create_cluster(args)
impala = cluster.impala
if impala.find_stopped_impalads():
impala.restart()
print_version(cluster)
impala.find_and_set_path_to_running_impalad_binary()
if args.cancel_current_queries and impala.queries_are_running():
impala.cancel_queries()
sleep(10)
if impala.queries_are_running():
raise Exception("Queries are currently running on the cluster")
impala.min_impalad_mem_mb = min(impala.find_impalad_mem_mb_limit())
queries_with_runtime_info_by_db_sql_and_options = load_runtime_info(
converted_args.runtime_info_path, impala)
# Start loading the test queries.
queries = list()
# If random queries were requested, those will be handled later. Unlike random queries,
# the TPC queries are expected to always complete successfully.
if args.tpcds_db:
tpcds_queries = load_tpc_queries("tpcds")
assert len(tpcds_queries) == EXPECTED_TPCDS_QUERIES_COUNT
for query in tpcds_queries:
query.db_name = args.tpcds_db
queries.extend(tpcds_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpcds_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_db:
tpch_queries = load_tpc_queries("tpch")
assert len(tpch_queries) == EXPECTED_TPCH_STRESS_QUERIES_COUNT
for query in tpch_queries:
query.db_name = args.tpch_db
queries.extend(tpch_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_nested_db:
tpch_nested_queries = load_tpc_queries("tpch_nested")
assert len(tpch_nested_queries) == EXPECTED_TPCH_NESTED_QUERIES_COUNT
for query in tpch_nested_queries:
query.db_name = args.tpch_nested_db
queries.extend(tpch_nested_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_nested_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_kudu_db:
tpch_kudu_queries = load_tpc_queries("tpch")
assert len(tpch_kudu_queries) == EXPECTED_TPCH_STRESS_QUERIES_COUNT
for query in tpch_kudu_queries:
query.db_name = args.tpch_kudu_db
queries.extend(tpch_kudu_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_kudu_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.generate_dml_queries:
with impala.cursor(db_name=args.tpch_kudu_db) as cursor:
prepare_database(cursor)
queries.extend(generate_DML_queries(cursor, args.dml_mod_values))
if args.tpcds_kudu_db:
tpcds_kudu_queries = load_tpc_queries("tpcds")
assert len(tpcds_kudu_queries) == EXPECTED_TPCDS_QUERIES_COUNT
for query in tpcds_kudu_queries:
query.db_name = args.tpcds_kudu_db
queries.extend(tpcds_kudu_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpcds_kudu_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.generate_dml_queries:
with impala.cursor(db_name=args.tpcds_kudu_db) as cursor:
prepare_database(cursor)
queries.extend(generate_DML_queries(cursor, args.dml_mod_values))
if args.reset_databases_before_binary_search:
for database in set(query.db_name for query in queries):
with impala.cursor(db_name=database) as cursor:
reset_databases(cursor)
queries = populate_all_queries(
queries, impala, converted_args, queries_with_runtime_info_by_db_sql_and_options)
# A particular random query may either fail (due to a generator or Impala bug) or
# take a really long time to complete. So the queries needs to be validated. Since the
# runtime info also needs to be collected, that will serve as validation.
if args.random_db:
query_generator = QueryGenerator(DefaultProfile())
with impala.cursor(db_name=args.random_db) as cursor:
tables = [cursor.describe_table(t) for t in cursor.list_table_names()]
queries.extend(load_random_queries_and_populate_runtime_info(
query_generator, SqlWriter.create(), tables, impala, converted_args))
if args.query_file_path:
file_queries = load_queries_from_test_file(
args.query_file_path, db_name=args.query_file_db)
shuffle(file_queries)
queries.extend(populate_runtime_info_for_random_queries(
impala, file_queries, converted_args))
# Apply tweaks to the query's runtime info as requested by CLI options.
for idx in xrange(len(queries) - 1, -1, -1):
query = queries[idx]
if query.required_mem_mb_with_spilling:
query.required_mem_mb_with_spilling += int(
query.required_mem_mb_with_spilling * args.mem_limit_padding_pct / 100.0) + \
args.mem_limit_padding_abs
if query.required_mem_mb_without_spilling:
query.required_mem_mb_without_spilling += int(
query.required_mem_mb_without_spilling * args.mem_limit_padding_pct / 100.0) + \
args.mem_limit_padding_abs
# Remove any queries that would use "too many" resources. This way a larger number
# of queries will run concurrently.
if query.required_mem_mb_without_spilling is not None and \
query.required_mem_mb_without_spilling / float(impala.min_impalad_mem_mb) \
> args.filter_query_mem_ratio:
LOG.debug(
"Filtering non-spilling query that exceeds "
"--filter-query-mem-ratio: " + query.sql)
query.required_mem_mb_without_spilling = None
if query.required_mem_mb_with_spilling is None \
or query.required_mem_mb_with_spilling / float(impala.min_impalad_mem_mb) \
> args.filter_query_mem_ratio:
LOG.debug("Filtering query that exceeds --filter-query-mem-ratio: " + query.sql)
del queries[idx]
if len(queries) == 0:
raise Exception("All queries were filtered")
print("Using %s queries" % len(queries))
# After the binary search phase finishes, it may be a good idea to reset the database
# again to start the stress test from a clean state.
if args.reset_databases_after_binary_search:
for database in set(query.db_name for query in queries):
with impala.cursor(db_name=database) as cursor:
reset_databases(cursor)
LOG.info("Number of queries in the list: {0}".format(len(queries)))
stress_runner = StressRunner()
stress_runner.results_dir = args.results_dir
stress_runner.startup_queries_per_sec = args.startup_queries_per_second
stress_runner.num_successive_errors_needed_to_abort = args.fail_upon_successive_errors
stress_runner.use_kerberos = args.use_kerberos
stress_runner.cancel_probability = args.cancel_probability
stress_runner.spill_probability = args.spill_probability
stress_runner.leak_check_interval_mins = args.mem_leak_check_interval_mins
stress_runner.common_query_options = converted_args.common_query_options
stress_runner.test_admission_control = converted_args.test_admission_control
stress_runner.max_coordinators = converted_args.max_coordinators
stress_runner.run_queries(
queries, impala, args.max_queries, args.mem_overcommit_pct,
should_print_status=not args.no_status,
verify_results=not args.generate_dml_queries,
select_probability=args.select_probability)
if __name__ == "__main__":
main()
| {
"content_hash": "38762bcdf4f3c62758d0dd9323210c3b",
"timestamp": "",
"source": "github",
"line_count": 2423,
"max_line_length": 90,
"avg_line_length": 44.59141560049525,
"alnum_prop": 0.6645564348188255,
"repo_name": "cloudera/Impala",
"id": "b400650973a2e479835e02c8b0c24cc201c1fb80",
"size": "108045",
"binary": false,
"copies": "1",
"ref": "refs/heads/cdh6.3.0",
"path": "tests/stress/concurrent_select.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "442143"
},
{
"name": "C++",
"bytes": "13783954"
},
{
"name": "CMake",
"bytes": "203812"
},
{
"name": "CSS",
"bytes": "148115"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "5599852"
},
{
"name": "JavaScript",
"bytes": "754881"
},
{
"name": "Lex",
"bytes": "26483"
},
{
"name": "PLpgSQL",
"bytes": "3459"
},
{
"name": "Python",
"bytes": "3078349"
},
{
"name": "Roff",
"bytes": "1633"
},
{
"name": "Shell",
"bytes": "147776"
},
{
"name": "TSQL",
"bytes": "9978"
},
{
"name": "Thrift",
"bytes": "287362"
}
],
"symlink_target": ""
} |
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANA")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.StandardSequences.Services_cff")
process.load("GeneratorInterface.HydjetInterface.hydjetDefault_cfi")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1)
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
#process.SimpleMemoryCheck = cms.Service('SimpleMemoryCheck',
# ignoreTotal=cms.untracked.int32(0),
# oncePerEventMode = cms.untracked.bool(False)
# )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck")
)
import FWCore.Utilities.FileUtils as FileUtils
mylist = FileUtils.loadListFromFile ('filelist.txt')
for fname in mylist:
process.source.fileNames.append('file:%s' % (fname))
process.ana = cms.EDAnalyzer('AMPTAnalyzer'
)
process.TFileService = cms.Service('TFileService',
fileName = cms.string('outtreefile_ampt.root')
)
process.p = cms.Path(process.ana)
| {
"content_hash": "79101381754efdc837043eb6a8fe729d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 89,
"avg_line_length": 35.375,
"alnum_prop": 0.6127208480565371,
"repo_name": "tuos/FlowAndCorrelations",
"id": "54a23fe2abf8338a6192ca2ee9111bb9624d719d",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/ampt/production/tree/v1/testAMPT.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1659934"
},
{
"name": "C++",
"bytes": "492449"
},
{
"name": "Fortran",
"bytes": "241100"
},
{
"name": "Io",
"bytes": "128"
},
{
"name": "Python",
"bytes": "196718"
},
{
"name": "Shell",
"bytes": "32809"
}
],
"symlink_target": ""
} |
from compiler.ast import flatten
from transformers import camxes_morphology
from structures.gensuha import BuLetteral, ZeiLujvo, Tosmabru, Slinkuhi
class Transformer(object):
def transform(self, parsed):
return Visitor().visit(parsed)
def default_serializer(self):
return lambda x: x.as_json()
class Visitor(camxes_morphology.Visitor):
def visit_vlatai(self, node, visited_children):
return visited_children[1]
def visit_tosmabru(self, node, visited_children):
return Tosmabru(flatten(visited_children))
def visit_slinkuhi(self, node, visited_children):
return Slinkuhi(flatten(visited_children))
def visit_vlatai_bu_clause(self, node, visited_children):
return BuLetteral(flatten(visited_children))
def visit_vlatai_zei_clause(self, node, visited_children):
return ZeiLujvo(flatten(visited_children))
| {
"content_hash": "7cecf9c2161dad3b2258e4b2450e3adf",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 29.9,
"alnum_prop": 0.7224080267558528,
"repo_name": "teleological/camxes-py",
"id": "7156532135acafdff5bd05b580b83560d961b957",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transformers/vlatai.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110863"
}
],
"symlink_target": ""
} |
r"""Code to clone the github repository, download the checkpoint and generate the frozen graph.
The frozen VGGish checkpoint will be saved to `/tmp/mediapipe/vggish_new.pb`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import app
import tensorflow.compat.v1 as tf
from tensorflow.python.tools import freeze_graph
BASE_DIR = '/tmp/mediapipe/'
def create_vggish_frozen_graph():
"""Create the VGGish frozen graph."""
os.system('git clone https://github.com/tensorflow/models.git')
sys.path.append('models/research/audioset/vggish/')
import vggish_slim
os.system('curl -O https://storage.googleapis.com/audioset/vggish_model.ckpt')
ckpt_path = 'vggish_model.ckpt'
with tf.Graph().as_default(), tf.Session() as sess:
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, ckpt_path)
saver = tf.train.Saver(tf.all_variables())
freeze_graph.freeze_graph_with_def_protos(
sess.graph_def,
saver.as_saver_def(),
ckpt_path,
'vggish/fc2/BiasAdd',
restore_op_name=None,
filename_tensor_name=None,
output_graph='/tmp/mediapipe/vggish_new.pb',
clear_devices=True,
initializer_nodes=None)
os.system('rm -rf models/')
os.system('rm %s' % ckpt_path)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
create_vggish_frozen_graph()
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "d804d1bc414153955a98ab4b4fa60ae5",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 95,
"avg_line_length": 27.403508771929825,
"alnum_prop": 0.6901408450704225,
"repo_name": "google/mediapipe",
"id": "786e2a6e7894ed6c81c6feb441c78fc22b532405",
"size": "2149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediapipe/examples/desktop/youtube8m/generate_vggish_frozen_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "514"
},
{
"name": "C",
"bytes": "76928"
},
{
"name": "C++",
"bytes": "10897312"
},
{
"name": "Dockerfile",
"bytes": "2659"
},
{
"name": "HTML",
"bytes": "4090"
},
{
"name": "Java",
"bytes": "1151252"
},
{
"name": "JavaScript",
"bytes": "6380"
},
{
"name": "Makefile",
"bytes": "1625"
},
{
"name": "Objective-C",
"bytes": "125458"
},
{
"name": "Objective-C++",
"bytes": "131706"
},
{
"name": "Python",
"bytes": "1272093"
},
{
"name": "Shell",
"bytes": "19580"
},
{
"name": "Starlark",
"bytes": "1277085"
},
{
"name": "TypeScript",
"bytes": "169026"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# MNIST data를 불러오고, one-hot encoding을 합니다.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
# Parameter setting
total_epoch = 30
batch_size =100
learning_rate = 0.0002
# Neural network features
n_hidden = 256
# The size of inputs
n_input = 28 * 28
# Generator의 입력으로 사용될 Noise의 크기
n_noise = 128
# Step 1 Neural network
X = tf.placeholder(tf.float32,[None,n_input])
Z = tf.placeholder(tf.float32,[None,n_noise])
# Generator variables
W1 = tf.Variable(tf.random_normal([n_noise,n_hidden],stddev= 0.01))
B1 = tf.Variable(tf.zeros([n_hidden]))
W2 = tf.Variable(tf.random_normal([n_hidden,n_input],stddev = 0.01))
B2 = tf.Variable(tf.zeros([n_input]))
# Discriminator
W3 = tf.Variable(tf.random_normal([n_input,n_hidden],stddev =0.01))
B3 = tf.Variable(tf.zeros([n_hidden]))
W4 = tf.Variable(tf.random_normal([n_hidden,1],stddev= 0.01))
B4 = tf.Variable(tf.zeros([1]))
# Generator neural network
def generator(noise):
hidden_layer = tf.nn.relu(tf.matmul(noise,W1)+B1)
generated_outputs = tf.sigmoid(tf.matmul(hidden_layer,W2)+B2)
return generated_outputs
def discriminator(inputs):
hidden_layer = tf.nn.relu(tf.matmul(inputs,W3)+B3)
discrimination = tf.sigmoid(tf.matmul(hidden_layer,W4)+B4)
return discrimination
def gen_noise(batch_size):
return np.random.normal(size=[batch_size,n_noise])
# Generate random image
G = generator(Z)
# Get the value by using a image from noise
D_gene = discriminator(G)
# Get the value by using a real image
D_real = discriminator(X)
# Optimization: Maximize loss_G and loss_D
# To maximize loss_D, minimize D_gene
loss_D = tf.reduce_mean(tf.log(D_real)+tf.log(1-D_gene))
loss_G = tf.reduce_mean(tf.log(D_gene))
G_var_list = [W1,B1,W2,B2]
D_var_list = [W3,B3,W4,B4]
train_D = tf.train.AdamOptimizer(learning_rate).minimize(-loss_D, var_list=D_var_list)
train_G = tf.train.AdamOptimizer(learning_rate).minimize(-loss_G, var_list=G_var_list)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
loss_val_D,loss_val_G = 0, 0
for epoch in range(total_epoch):
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
noise = gen_noise(batch_size)
_, loss_val_D = sess.run([train_D,loss_D],feed_dict={X:batch_xs,Z:noise})
_, loss_val_G = sess.run([train_G,loss_G],feed_dict={Z:noise})
print("Epoch:",epoch,"D loss:",loss_val_D,"G loss:",loss_val_G)
k_noise = gen_noise(10)
pred = sess.run(G,feed_dict={Z:k_noise})
figure, axis = plt.subplots(1, 10, figsize=(10,1))
for i in range(10):
axis[i].set_axis_off()
axis[i].imshow(np.reshape(pred[i],(28,28)))
plt.savefig("./samples/{}.png".format(str(epoch).zfill(3)), bbox_inches='tight')
plt.close(figure)
print("Optimization finished")
| {
"content_hash": "2c414eec7daa60396be3d69fe04af10c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 88,
"avg_line_length": 29.10377358490566,
"alnum_prop": 0.6722852512155592,
"repo_name": "Wonjuseo/Project101",
"id": "4bdcfd49bd6e063aa391584887265180018ef82e",
"size": "3223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1/1-7. GAN.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "48063"
},
{
"name": "Python",
"bytes": "92597"
}
],
"symlink_target": ""
} |
from main import utils
from main import utils_coordinates
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
def find_locations(ship_date_time, ship_date_times):
ship_date_times = ship_date_times.replace("\r", "")
# The single location
single_location = ship_location(ship_date_time)
latitude_decimal_degrees_single_location = single_location['latitude']
longitude_decimal_degrees_single_location = single_location['longitude']
date_time = single_location['date_time']
if latitude_decimal_degrees_single_location is None or longitude_decimal_degrees_single_location is None or date_time is None:
latitude_decimal_degrees_single_location = longitude_decimal_degrees_single_location = "Unknown"
latitude_degrees_decimal_minutes_single_location = longitude_degrees_decimal_minutes_single_location = "Unknown"
latitude_degrees_minutes_decimal_seconds_single_location = longitude_degrees_minutes_decimal_seconds_single_location = "Unknown"
try:
decimal_degrees_single_location = (float(latitude_decimal_degrees_single_location), float(longitude_decimal_degrees_single_location))
(latitude_degrees_decimal_minutes_single_location,
longitude_degrees_decimal_minutes_single_location) = utils_coordinates.convert_decimal_degrees_to(decimal_degrees_single_location,
"decimal_degrees_minutes")
(latitude_degrees_minutes_decimal_seconds_single_location,
longitude_degrees_minutes_decimal_seconds_single_location) = utils_coordinates.convert_decimal_degrees_to(decimal_degrees_single_location,
"decimal_degrees_minutes_seconds")
except ValueError:
# Sadly the ship_location() can return strings - in the try code it tries to convert them to float,
# if it fails we return this to the next level.
pass
message = single_location['message']
# List of locations
list_of_locations = []
if len(ship_date_times) > 10:
for ship_date_time in ship_date_times.split("\n"):
location = ship_location(ship_date_time)
if location['latitude'] is None or location['longitude'] is None:
location['latitude'] = ""
location['longitude'] = ""
location['date_time'] = "(It was empty)"
if ship_date_time != "":
location['date_time'] = ship_date_time
(latitude_decimal_degrees, longitude_decimal_degrees) = (location['latitude'], location['longitude'])
(latitude_degrees_decimal_minutes, longitude_degrees_decimal_minutes) = (latitude_decimal_degrees, longitude_decimal_degrees)
(latitude_degrees_minutes_decimal_seconds, longitude_degrees_minutes_decimal_seconds) = (latitude_decimal_degrees, longitude_decimal_degrees)
try:
decimal_degrees = (float(latitude_decimal_degrees), float(longitude_decimal_degrees))
(latitude_degrees_decimal_minutes, longitude_degrees_decimal_minutes) = utils_coordinates.convert_decimal_degrees_to(decimal_degrees, "decimal_degrees_minutes")
(latitude_degrees_minutes_decimal_seconds, longitude_degrees_minutes_decimal_seconds) = utils_coordinates.convert_decimal_degrees_to(decimal_degrees, "decimal_degrees_minutes_seconds")
except ValueError:
# Sadly the ship_location() can return strings - in the try code it tries to convert them to float,
# if it fails we return this to the next level.
pass
information = {'date_time': location['date_time'],
'latitude_decimal_degrees': latitude_decimal_degrees,
'longitude_decimal_degrees': longitude_decimal_degrees,
'latitude_degrees_decimal_minutes': latitude_degrees_decimal_minutes,
'longitude_degrees_decimal_minutes': longitude_degrees_decimal_minutes,
'latitude_degrees_minutes_decimal_seconds': latitude_degrees_minutes_decimal_seconds,
'longitude_degrees_minutes_decimal_seconds': longitude_degrees_minutes_decimal_seconds
}
list_of_locations.append(information)
template_information = {
'ship_date_time': date_time,
'latitude_decimal_degrees_single_location': latitude_decimal_degrees_single_location,
'longitude_decimal_degrees_single_location': longitude_decimal_degrees_single_location,
'longitude_decimal_degrees_single_location': longitude_decimal_degrees_single_location,
'latitude_degrees_decimal_minutes_single_location': latitude_degrees_decimal_minutes_single_location,
'longitude_degrees_decimal_minutes_single_location': longitude_degrees_decimal_minutes_single_location,
'latitude_degrees_minutes_decimal_seconds_single_location': latitude_degrees_minutes_decimal_seconds_single_location,
'longitude_degrees_minutes_decimal_seconds_single_location': longitude_degrees_minutes_decimal_seconds_single_location,
'list_of_locations': list_of_locations,
'message': message
}
return template_information
def ship_location(str_datetime):
ship_date_time = utils.string_to_date_time(str_datetime)
if ship_date_time is None:
date_time = None
latitude = longitude = None
message = "Invalid date time (format has to be YYYY-MM-DD HH:mm:SS) (or without the secs)"
elif ship_date_time > utils.now_with_timezone():
date_time = "FUTURE"
latitude = longitude = None
message = "The date time seems to be in the future. We don't know where we are going to be!"
else:
location = utils.ship_location(ship_date_time)
latitude = location.latitude
longitude = location.longitude
date_time = location.date_time
message = ''
if latitude is None or longitude is None or date_time is None:
latitude = longitude = None
else:
latitude = "{0:.4f}".format(latitude)
longitude = "{0:.4f}".format(longitude)
return {'latitude': latitude, 'longitude': longitude, 'message': message, 'date_time': date_time}
| {
"content_hash": "80a35d9500c4a316f350ef6cf88c6a63",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 200,
"avg_line_length": 55.895161290322584,
"alnum_prop": 0.6696003462703795,
"repo_name": "cpina/science-cruise-data-management",
"id": "c7b26a020e4f1c113154ecbca9620ae4131e8e54",
"size": "6931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScienceCruiseDataManagement/main/find_locations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59966"
},
{
"name": "HTML",
"bytes": "50774"
},
{
"name": "JavaScript",
"bytes": "106205"
},
{
"name": "Python",
"bytes": "548151"
},
{
"name": "Shell",
"bytes": "106"
}
],
"symlink_target": ""
} |
"""
Mapping intervals between pairs of congruent segments
The IntervalMapper class is at the heart of mapping between aligned sequences. An instance
of :class:`uta.tools.intervalmapper.IntervalMapper` is constructed with an ordered list of
:class:`uta.tools.intervalmapper.IntervalPair` instances, each of which consists of two
:class:`uta.tools.intervalmapper.Interval` instances. The IntervalMapper class is unaware
of strand/orientation; that issue is handled by the
:class:`uta.tools.transcriptmapper.TranscriptMapper` class.
NOTE: Mapping at the boundaries around indels requires a choice. If seq B
has an insertion relative to seq A, then mapping coordinate at the
boundaries can either be minimal or maximal for both the start and
end. Consider this alignment::
0 15 20 35 50
|==========|====|==========|==========|
| | __/ __/| |
| |/ / | |
|==========|==========|====|==========|
0 15 30 35 50
15M 5D 15M 5I 15M
segment 1: [ 0,15] ~ [ 0,15]
segment 2: [15,20] ~ [15,15]
segment 3: [20,35] ~ [15,30]
segment 4: [35,35] ~ [30,35]
segment 5: [35,50] ~ [35,50]
and these intervals around reference position 35::
interval 1: 34,36 -> 29,36 (no ambiguity)
interval 2: 35,35 -> 30,35 (reasonable)
interval 3: 34,35 -> 29,30 (minimal) or 29,35 (maximal)
interval 4: 35,36 -> 35,36 (minimal) or 30,36 (maximal)
So, for interval 3, end_i=35 matches segment 3 and segment 4. Analagously
for interval 4, start_i=35 matches segment 4 and segment 5.
Currently, this code matches an interval <start_i,end_i> using the maximal
start_i and minimal end_i.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import re
from six.moves import range
from hgvs.exceptions import HGVSInvalidIntervalError
_logger = logging.getLogger(__name__)
_logger.warning("This module is deprecated and will be removed in a future release")
# N.B. This Interval is internal to intervalmapper.py. It is *NOT* the
# same as the Interval defined in location.py.
class Interval(object):
"""Represents a segment of a sequence in interbase
coordinates (0-based, right-open).
"""
__slots__ = ("start_i", "end_i")
def __init__(self, start_i, end_i):
if not (start_i <= end_i):
raise HGVSInvalidIntervalError("start_i must be less than or equal to end_i")
self.start_i = start_i
self.end_i = end_i
@property
def len(self):
return self.end_i - self.start_i
def __repr__(self):
return "{self.__class__.__name__}(start_i={self.start_i},end_i={self.end_i})".format(self=self)
class IntervalPair(object):
"""Represents a match, insertion, or deletion segment of an
alignment. If a match, the lengths must be equal; if an insertion or
deletion, the length of the ref or tgt must be zero respectively."""
__slots__ = ("ref", "tgt")
def __init__(self, ref, tgt):
if not ((ref.len == tgt.len) or (ref.len == 0 and tgt.len != 0) or (ref.len != 0 and tgt.len == 0)):
raise HGVSInvalidIntervalError("IntervalPair doesn't represent a match, insertion, or deletion")
self.ref = ref
self.tgt = tgt
def __repr__(self):
return "{self.__class__.__name__}(ref={self.ref},tgt={self.tgt})".format(self=self)
class IntervalMapper(object):
"""Provides mapping between sequence coordinates according to an
ordered set of IntervalPairs."""
__slots__ = ("interval_pairs", "ref_intervals", "tgt_intervals", "ref_len", "tgt_len")
def __init__(self, interval_pairs):
"""
:param interval_pairs: an ordered list of IntervalPair instances
:type interval_pairs: list (of IntervalPair instances).
:returns: an IntervalMapper instance
"""
def _validate_intervals(ivs):
for i in range(1, len(ivs)):
# check for adjacency/non-overlap
# This constraint, combined with the start_i <= end_i constraint
# of Intervals, guarantees that intervals are ordered
assert ivs[i - 1].end_i == ivs[i].start_i, "intervals must be adjacent"
self.interval_pairs = interval_pairs
self.ref_intervals = [ip.ref for ip in self.interval_pairs]
self.tgt_intervals = [ip.tgt for ip in self.interval_pairs]
_validate_intervals(self.ref_intervals)
_validate_intervals(self.tgt_intervals)
self.ref_len = sum([iv.len for iv in self.ref_intervals])
self.tgt_len = sum([iv.len for iv in self.tgt_intervals])
@staticmethod
def from_cigar(cigar):
"""
:param cigar: a Compact Idiosyncratic Gapped Alignment Report string
:type cigar: str.
:returns: an IntervalMapper instance from the CIGAR string
"""
return IntervalMapper(cigar_to_intervalpairs(cigar))
def map_ref_to_tgt(self, start_i, end_i, max_extent=False):
return self._map(self.ref_intervals, self.tgt_intervals, start_i, end_i, max_extent)
def map_tgt_to_ref(self, start_i, end_i, max_extent=False):
return self._map(self.tgt_intervals, self.ref_intervals, start_i, end_i, max_extent)
@staticmethod
def _map(from_ivs, to_ivs, from_start_i, from_end_i, max_extent):
def iv_map(from_ivs, to_ivs, from_start_i, from_end_i, max_extent):
"""returns the <start,end> intervals indexes in which from_start_i and from_end_i occur"""
# first look for 0-width interval that matches
seil = [i for i, iv in enumerate(from_ivs) if iv.start_i == from_start_i and iv.end_i == from_end_i]
if len(seil) > 0:
si = ei = seil[0]
else:
sil = [i for i, iv in enumerate(from_ivs) if iv.start_i <= from_start_i <= iv.end_i]
eil = [i for i, iv in enumerate(from_ivs) if iv.start_i <= from_end_i <= iv.end_i]
if len(sil) == 0 or len(eil) == 0:
raise HGVSInvalidIntervalError("start or end or both are beyond the bounds of transcript record")
si, ei = (sil[0], eil[-1]) if max_extent else (sil[-1], eil[0])
return si, ei
def clip_to_iv(iv, pos):
return max(iv.start_i, min(iv.end_i, pos))
assert from_start_i <= from_end_i, "expected from_start_i <= from_end_i"
try:
si, ei = iv_map(from_ivs, to_ivs, from_start_i, from_end_i, max_extent)
except ValueError:
raise HGVSInvalidIntervalError("start_i,end_i interval out of bounds")
to_start_i = clip_to_iv(to_ivs[si], to_ivs[si].start_i + (from_start_i - from_ivs[si].start_i))
to_end_i = clip_to_iv(to_ivs[ei], to_ivs[ei].end_i - (from_ivs[ei].end_i - from_end_i))
return to_start_i, to_end_i
class CIGARElement(object):
"""represents elements of a CIGAR string and provides methods for
determining the number of ref and tgt bases consumed by the
operation"""
__slots__ = ("len", "op")
def __init__(self, len, op):
self.len = len
self.op = op
@property
def ref_len(self):
"""returns number of nt/aa consumed in reference sequence for this edit"""
return self.len if self.op in "=INX" else 0
@property
def tgt_len(self):
"""returns number of nt/aa consumed in target sequence for this edit"""
return self.len if self.op in "=DX" else 0
def cigar_to_intervalpairs(cigar):
"""For a given CIGAR string, return a list of (Interval,Interval)
pairs. The length of the returned list will be equal to the
number of CIGAR operations
"""
cigar_elem_re = re.compile(r"(?P<len>\d+)(?P<op>[=DIMNX])")
ces = [
CIGARElement(op=md["op"], len=int(md["len"])) for md in [m.groupdict() for m in cigar_elem_re.finditer(cigar)]
]
ips = [None] * len(ces)
ref_pos = tgt_pos = 0
for i, ce in enumerate(ces):
ips[i] = IntervalPair(Interval(ref_pos, ref_pos + ce.ref_len), Interval(tgt_pos, tgt_pos + ce.tgt_len))
ref_pos += ce.ref_len
tgt_pos += ce.tgt_len
return ips
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| {
"content_hash": "eaea60c9488f9324be3f843a48f5190c",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 118,
"avg_line_length": 39.83111111111111,
"alnum_prop": 0.6226288774827048,
"repo_name": "biocommons/hgvs",
"id": "240e91c7f6cfd3bc7fb2687ab5666c00b4399317",
"size": "8986",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/hgvs/intervalmapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "212160"
},
{
"name": "Makefile",
"bytes": "9856"
},
{
"name": "Perl",
"bytes": "2704"
},
{
"name": "Python",
"bytes": "530044"
},
{
"name": "Shell",
"bytes": "2461"
}
],
"symlink_target": ""
} |
from .dates import *
from .structs import *
from .logging import *
from .stopwatch import *
from .growl import *
from .text import *
from .shell import *
from .network import *
from .reflection import Reflector
from fuze.utilities.clipboard import Clipboard | {
"content_hash": "8438e56a001da4aece74881046400984",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 25.7,
"alnum_prop": 0.77431906614786,
"repo_name": "MagicWishMonkey/fuze",
"id": "e1fd0f67d86f518d2a2b0aa46a457924d2a4fd59",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuze/utilities/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "312954"
},
{
"name": "Shell",
"bytes": "107"
}
],
"symlink_target": ""
} |
from berth.job.models import Job
from berth.job import constants
from berth.utils.test import TestCase
class TestJobNumbers(TestCase):
def test_job_number_increments(self):
project = self.project
job1 = Job.objects.create(
project=self.project,
state=constants.BUILD_STATUS_QUEUED,
)
job2 = Job.objects.create(
project=self.project,
state=constants.BUILD_STATUS_QUEUED,
)
job3 = Job.objects.create(
project=self.create_project(),
state=constants.BUILD_STATUS_QUEUED,
)
job4 = Job.objects.create(
project=self.project,
state=constants.BUILD_STATUS_QUEUED,
)
# i've hacked support for `insert...returning`
# into django. if these are failing, it's broken
assert job1.number == 1
assert job2.number == 2
assert job3.number == 1
assert job4.number == 3
job1 = Job.objects.get(pk=job1.pk)
job2 = Job.objects.get(pk=job2.pk)
job3 = Job.objects.get(pk=job3.pk)
job4 = Job.objects.get(pk=job4.pk)
assert job1.number == 1
assert job2.number == 2
assert job3.number == 1
assert job4.number == 3
| {
"content_hash": "6eb26dd9747943d7e586607252bfc499",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 56,
"avg_line_length": 29.022727272727273,
"alnum_prop": 0.586530931871574,
"repo_name": "joealcorn/berth.cc",
"id": "1dfdb786b53c0eda4d680917e90be8dbd937cefc",
"size": "1277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "berth/tests/job/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1007"
},
{
"name": "Python",
"bytes": "42875"
}
],
"symlink_target": ""
} |
def remove_adjacent(nums):
uniq = []
for num in nums:
if num not in uniq:
uniq.append(num)
return uniq
# +++your code here+++
##return
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
list1.extend(list2)
list1.sort()
return list1
# +++your code here+++
##return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print(prefix, 'got:', repr(got), 'expected:', repr(expected))
# Calls the above functions with interesting inputs.
def main():
print('remove_adjacent')
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print('linear_merge')
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| {
"content_hash": "00127803a9a8f99445959ec8dc184d3f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 31.305084745762713,
"alnum_prop": 0.6253383865728208,
"repo_name": "apcrumbleton/google-python-exercises",
"id": "330ae06d16e393c43ba2511576273f66cfc9429f",
"size": "2325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/list2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "1295556"
},
{
"name": "Python",
"bytes": "99148"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'numpydoc',
]
# numpydoc setting
numpydoc_show_class_members = False
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'dodocs'
copyright = '2015, Francesco Montesano'
author = 'Francesco Montesano'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import dodocs
# The short X.Y version.
version = '.'.join(dodocs.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = dodocs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
import alabaster
extensions.append('alabaster')
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"github_button": True,
"github_user": "montefra",
"github_repo": "dodocs",
"description": "Build documentation"}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'dodocsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dodocs.tex', 'dodocs Documentation',
'Francesco Montesano', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dodocs', 'dodocs Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dodocs', 'dodocs Documentation',
author, 'dodocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3': None}
| {
"content_hash": "baacdad428d7ba2d18dcc62edb8caae9",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 80,
"avg_line_length": 31.245283018867923,
"alnum_prop": 0.6970324361628709,
"repo_name": "montefra/dodocs",
"id": "7e1368588d106ef1d9e1b2c945e6b67dfb04ca92",
"size": "12034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63531"
}
],
"symlink_target": ""
} |
"""Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.validation import NotFittedError
from ..utils.multiclass import check_classification_targets
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| {
"content_hash": "083bdd75b25ddae8c4fff8d4ffaccfd1",
"timestamp": "",
"source": "github",
"line_count": 1824,
"max_line_length": 91,
"avg_line_length": 38.422697368421055,
"alnum_prop": 0.5829944494385229,
"repo_name": "ryfeus/lambda-packs",
"id": "ff324e286fa73b06aabb47140451c9e62acc0410",
"size": "70083",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sklearn_scipy_numpy/source/sklearn/ensemble/gradient_boosting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_raw_fpu_cvt_paired(BaseBERITestCase):
@attr('floatpaired')
def test_convert_ps_to_s(self):
'''Test we can convert paired single to single'''
self.assertRegisterEqual(self.MIPS.a0, 0x33333333, "Didn't extract lower single.")
self.assertRegisterEqual(self.MIPS.a1, 0xDDDDDDDD, "Didn't extract upper single.")
| {
"content_hash": "1d6e0d7a2df2e4662cac70e30fbe4aa0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 90,
"avg_line_length": 43.3,
"alnum_prop": 0.7297921478060047,
"repo_name": "8l/beri",
"id": "aabee0dfd2e6d9cf8743c9ec69f36a891835f21f",
"size": "1752",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/fpu/test_raw_fpu_cvt_paired.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
} |
print 'I could have code like this.' # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print ("This won't run")
print 'This will run.'
| {
"content_hash": "b0f5060f5704ce11cd2e3db4fa8c005b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 73,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6796116504854369,
"repo_name": "Herne/pythonplayground",
"id": "d578160a63f2920d8dc09e96b9b25fad4d1a57f3",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LPTHW_ex2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55091"
}
],
"symlink_target": ""
} |
from ..structure import *
from ..utils import *
class GibbsAll(MCMC):
def __init__(self, problem, verbose_int = 100, N = 1000, T = 10000, record_start = 3000):
MCMC.__init__(self, problem, "Gibbs (All)", verbose_int, N, T, record_start)
def particle_to_tuple(self, p):
return p
def init_particle(self):
return tuple((np.random.choice(self.problem.net[rv].values) for rv in self.problem.rvs))
def update_particle(self, particle):
d = self.tuple_to_dict(particle)
for rv in self.problem.rvs:
d[rv] = self.problem.net[rv].sample_blanket(d)
return self.dict_to_tuple(d)
| {
"content_hash": "0ee148d8957dd8e08028eea6b6c56d61",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 96,
"avg_line_length": 34.05263157894737,
"alnum_prop": 0.624420401854714,
"repo_name": "SsnL/amcmc",
"id": "a8fe2cb901963a709e25a297771b0c0fc58b93a8",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inference/gibbs_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53129"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import unittest
from importlib import import_module
import settings
from base import Schema
from parser import PlSqlParser
from dbgate import OraConnection
sys.path.append(settings.PLSQL_SCHEMA_ROOT)
TEST_PACKAGE = 'plsqlparsertestpackage'
class TestParser(unittest.TestCase):
"""
Class implements tests for PLSQL parser
"""
def setUp(self):
self.fixtures = {}
for item in os.listdir(os.path.dirname(__file__) + '/fixture'):
match = re.match('(\w+)\.pkg', item)
if match:
f = open(os.path.dirname(__file__) + '/fixture/' + item)
self.fixtures[match.group(1)] = f.read()
f.close()
self.parser = PlSqlParser()
def test_parse_plsqlparsertestpackage(self):
"""
Tests to parse PL/SQL package specification
"""
functions, procedures, constants = self.parser.get_package_members(
self.fixtures[TEST_PACKAGE]
)
self.assertEquals(len(functions), 12)
self.assertEquals(len(procedures), 1)
self.assertEquals(len(constants), 3)
class TestCreator(unittest.TestCase):
"""
Class implements test to call functions and procedures
"""
def setUp(self):
self.connection = OraConnection.connect(
settings.ORA_SID,
settings.ORA_SCHEMA,
settings.ORA_PASSWORD
)
self._create_package()
# dump packages
self.schema = Schema(self.connection)
self.schema.generate_packages()
# import packages modules to runtime
module = import_module(settings.ORA_SCHEMA + '.' + TEST_PACKAGE)
self.package = getattr(module, TEST_PACKAGE)
self.package.connection = self.connection
def _create_package(self):
"""
Function to create test package in the schema
"""
f = open(os.path.dirname(__file__) + '/fixture/' + TEST_PACKAGE + '.pkg')
package_spec = f.read()
f.close()
f = open(os.path.dirname(__file__) + '/fixture/' + TEST_PACKAGE + '_body.pkg')
package_body = f.read()
f.close()
cursor = self.connection.cursor()
cursor.execute(package_spec)
cursor.execute(package_body)
def test_package_Empty_Arguments_Return_Clob(self):
result = self.package.Empty_Arguments_Return_Clob()
self.assertEqual(result, self.package.GC_CLOB_FOR_RETURN)
def test_package_Empty_Arguments_Return_Number(self):
result = self.package.Empty_Arguments_Return_Number()
self.assertEqual(result, self.package.GC_NUMBER_FOR_RETURN)
def test_package_Empty_Arguments_Return_String(self):
result = self.package.Empty_Arguments_Return_String()
self.assertEqual(result, self.package.GC_VARCHAR2_FOR_RETURN)
def test_package_Empty_Arguments_Return_Cursor(self):
cursor = self.package.Empty_Arguments_Return_Cursor()
row = cursor.next()
self.assertIsNotNone(row)
self.assertEqual(row['val_number'], self.package.GC_NUMBER_FOR_RETURN)
self.assertEqual(row['val_varchar'], self.package.GC_VARCHAR2_FOR_RETURN)
def test_package_Return_Empty_Cursor(self):
cursor = self.package.Return_Empty_Cursor()
row = cursor.next()
self.assertIsNone(row)
def test_package_Return_Big_Cursor(self):
cursor = self.package.Return_Big_Cursor()
row = cursor.next()
self.assertIsNotNone(row)
while row:
self.assertEqual(row['val_number'], self.package.GC_NUMBER_FOR_RETURN)
self.assertEqual(row['val_varchar'], self.package.GC_VARCHAR2_FOR_RETURN)
row = cursor.next()
def test_package_In_Number_Return_Number(self):
result = self.package.In_Number_Return_Number(1)
self.assertEqual(result, 1)
def test_package_In_Varchar2_Return_Varchar2(self):
result = self.package.In_Varchar2_Return_Varchar2('test')
self.assertEqual(result, 'test')
def test_package_In_Complex_Return_Cursor(self):
result = self.package.In_Complex_Return_Cursor(1, 'test', 'test')
row = result.next()
self.assertEqual(row['val_number'], 1)
self.assertEqual(row['val_varchar'], 'test')
self.assertEqual(row['val_clob'], 'test')
def test_package_Out_Number_Return_Number(self):
o_number, o_varchar2, o_clob, o_cursor = None, None, None, None
result, o_number = self.package.Out_Number_Return_Number(o_number)
self.assertEqual(result, self.package.GC_NUMBER_FOR_RETURN)
self.assertEqual(o_number, self.package.GC_NUMBER_FOR_RETURN)
def test_package_Out_Arguments_Return_Cursor(self):
o_number, o_varchar2, o_clob, o_cursor = None, None, None, None
result, o_number, o_varchar2, o_clob, o_cursor = \
self.package.Out_Arguments_Return_Cursor(o_number, o_varchar2, o_clob, o_cursor)
self.assertEqual(o_number, self.package.GC_NUMBER_FOR_RETURN)
self.assertEqual(o_varchar2, self.package.GC_VARCHAR2_FOR_RETURN)
self.assertEqual(o_clob, self.package.GC_CLOB_FOR_RETURN)
row = o_cursor.next()
self.assertEqual(row['val_number'], self.package.GC_NUMBER_FOR_RETURN)
@unittest.case.skip("Don't know how to bind in out varchar2")
def test_package_In_Out_Arguments_Return_Cursor(self):
o_number, o_varchar2, o_clob, o_cursor = None, None, None, None
result, o_number, o_varchar2, o_clob =\
self.package.In_Out_Arguments_Return_Cursor(1, 'a', 'a')
self.assertEqual(o_number, 1 + self.package.GC_NUMBER_FOR_RETURN)
self.assertEqual(o_varchar2, 'a' + self.package.GC_VARCHAR2_FOR_RETURN)
self.assertEqual(o_clob, 'a' + self.package.GC_CLOB_FOR_RETURN)
def test_procedure_In_Args_Out_Cursor(self):
o_cursor = self.package.In_Args_Out_Cursor(1, 'a', 'b')
row = o_cursor.next()
self.assertIsNotNone(row)
if __name__ == "main":
unittest.main() | {
"content_hash": "8a346266078d6179387c65a39b96028e",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 92,
"avg_line_length": 32.54010695187166,
"alnum_prop": 0.637633525061627,
"repo_name": "mshogin/django-plsql",
"id": "cad9cfbd0c15ca726ac4d826c18a9b3595902338",
"size": "6085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plsql/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28174"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_subscriber_ipsub_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR subscriber\-ipsub package operational data.
This module contains definitions
for the following management objects\:
ip\-subscriber\: IP subscriber operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class IpsubMaIntfInitiatorDataEnum(Enum):
"""
IpsubMaIntfInitiatorDataEnum
Ipsub ma intf initiator data
.. data:: dhcp = 0
Session creation via DHCP discover packet
.. data:: packet_trigger = 1
Session creation via unclassified IPv4 packet
.. data:: invalid_trigger = 2
Invalid Trigger
"""
dhcp = 0
packet_trigger = 1
invalid_trigger = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpsubMaIntfInitiatorDataEnum']
class IpsubMaIntfStateDataEnum(Enum):
"""
IpsubMaIntfStateDataEnum
Interface states
.. data:: invalid = 0
Invalid state
.. data:: initialized = 1
Initial state
.. data:: session_creation_started = 2
Interface creation started
.. data:: control_policy_executing = 3
Interface created in IM, AAA session start
called
.. data:: control_policy_executed = 4
AAA session created
.. data:: session_features_applied = 5
Interface config activated
.. data:: vrf_configured = 6
Interface address and VRF information received
from IPv4
.. data:: adding_adjacency = 7
VRF configuration received and interface config
activated
.. data:: adjacency_added = 8
Subscriber AIB adjacency added
.. data:: up = 9
Session up
.. data:: down = 10
Session down
.. data:: address_family_down = 11
Session down in progress
.. data:: address_family_down_complete = 12
Session down complete
.. data:: disconnecting = 13
Session teardown in progress
.. data:: disconnected = 14
Session disconnected
.. data:: error = 15
Session in error state
"""
invalid = 0
initialized = 1
session_creation_started = 2
control_policy_executing = 3
control_policy_executed = 4
session_features_applied = 5
vrf_configured = 6
adding_adjacency = 7
adjacency_added = 8
up = 9
down = 10
address_family_down = 11
address_family_down_complete = 12
disconnecting = 13
disconnected = 14
error = 15
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpsubMaIntfStateDataEnum']
class IpsubMaParentIntfStateDataEnum(Enum):
"""
IpsubMaParentIntfStateDataEnum
Parent interface state
.. data:: deleted = 0
Interface being deleted
.. data:: down = 1
Interface operationally down
.. data:: up = 2
Interface up
"""
deleted = 0
down = 1
up = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpsubMaParentIntfStateDataEnum']
class IpsubMaParentIntfVlanEnum(Enum):
"""
IpsubMaParentIntfVlanEnum
Access interface VLAN type
.. data:: plain = 0
Plain
.. data:: ambiguous = 1
Ambiguous
"""
plain = 0
ambiguous = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpsubMaParentIntfVlanEnum']
class IpSubscriber(object):
"""
IP subscriber operational data
.. attribute:: nodes
IP subscriber operational data for a particular location
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = IpSubscriber.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
IP subscriber operational data for a particular
location
.. attribute:: node
Location. For eg., 0/1/CPU0
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
Location. For eg., 0/1/CPU0
.. attribute:: node_name <key>
The node ID to filter on. For eg., 0/1/CPU0
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: access_interfaces
IP subscriber access interface table
**type**\: :py:class:`AccessInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces>`
.. attribute:: interfaces
IP subscriber interface table
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Interfaces>`
.. attribute:: summary
IP subscriber interface summary
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.access_interfaces = IpSubscriber.Nodes.Node.AccessInterfaces()
self.access_interfaces.parent = self
self.interfaces = IpSubscriber.Nodes.Node.Interfaces()
self.interfaces.parent = self
self.summary = IpSubscriber.Nodes.Node.Summary()
self.summary.parent = self
class Summary(object):
"""
IP subscriber interface summary
.. attribute:: access_interface_summary
Access interface summary statistics
**type**\: :py:class:`AccessInterfaceSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary>`
.. attribute:: interface_counts
Initiator interface counts
**type**\: :py:class:`InterfaceCounts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts>`
.. attribute:: vrf
Array of VRFs with IPSUB interfaces
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.Vrf>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_interface_summary = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary()
self.access_interface_summary.parent = self
self.interface_counts = IpSubscriber.Nodes.Node.Summary.InterfaceCounts()
self.interface_counts.parent = self
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class AccessInterfaceSummary(object):
"""
Access interface summary statistics
.. attribute:: initiators
Summary counts per initiator
**type**\: :py:class:`Initiators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators>`
.. attribute:: interfaces
Number of interfaces with subscriber configuration
**type**\: int
**range:** 0..4294967295
.. attribute:: ipv6_initiators
Summary counts per initiator for ipv6 session
**type**\: :py:class:`Ipv6Initiators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.initiators = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators()
self.initiators.parent = self
self.interfaces = None
self.ipv6_initiators = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators()
self.ipv6_initiators.parent = self
class Initiators(object):
"""
Summary counts per initiator
.. attribute:: dhcp
DHCP summary statistics
**type**\: :py:class:`Dhcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp>`
.. attribute:: packet_trigger
Packet trigger summary statistics
**type**\: :py:class:`PacketTrigger <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp()
self.dhcp.parent = self
self.packet_trigger = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger()
self.packet_trigger.parent = self
class Dhcp(object):
"""
DHCP summary statistics
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:dhcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp']['meta_info']
class PacketTrigger(object):
"""
Packet trigger summary statistics
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:packet-trigger'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:initiators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dhcp is not None and self.dhcp._has_data():
return True
if self.packet_trigger is not None and self.packet_trigger._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators']['meta_info']
class Ipv6Initiators(object):
"""
Summary counts per initiator for ipv6 session
.. attribute:: dhcp
DHCP summary statistics
**type**\: :py:class:`Dhcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp>`
.. attribute:: packet_trigger
Packet trigger summary statistics
**type**\: :py:class:`PacketTrigger <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp()
self.dhcp.parent = self
self.packet_trigger = IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger()
self.packet_trigger.parent = self
class Dhcp(object):
"""
DHCP summary statistics
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:dhcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp']['meta_info']
class PacketTrigger(object):
"""
Packet trigger summary statistics
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:packet-trigger'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:ipv6-initiators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dhcp is not None and self.dhcp._has_data():
return True
if self.packet_trigger is not None and self.packet_trigger._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:access-interface-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.initiators is not None and self.initiators._has_data():
return True
if self.interfaces is not None:
return True
if self.ipv6_initiators is not None and self.ipv6_initiators._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary']['meta_info']
class InterfaceCounts(object):
"""
Initiator interface counts
.. attribute:: initiators
Initiators
**type**\: :py:class:`Initiators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators>`
.. attribute:: ipv6_initiators
IPv6 Initiators
**type**\: :py:class:`Ipv6Initiators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.initiators = IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators()
self.initiators.parent = self
self.ipv6_initiators = IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators()
self.ipv6_initiators.parent = self
class Initiators(object):
"""
Initiators
.. attribute:: dhcp
DHCP
**type**\: :py:class:`Dhcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp>`
.. attribute:: packet_trigger
Packet trigger
**type**\: :py:class:`PacketTrigger <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp = IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp()
self.dhcp.parent = self
self.packet_trigger = IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger()
self.packet_trigger.parent = self
class Dhcp(object):
"""
DHCP
.. attribute:: adding_adjacency
Adding adjacency
**type**\: int
**range:** 0..4294967295
.. attribute:: adjacency_added
Adjacency added
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executed
Control policy executed
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executing
Control policy executing
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnected
Disconnected
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnecting
Disconnecting
**type**\: int
**range:** 0..4294967295
.. attribute:: down
Down
**type**\: int
**range:** 0..4294967295
.. attribute:: error
Error
**type**\: int
**range:** 0..4294967295
.. attribute:: initialized
Initialized
**type**\: int
**range:** 0..4294967295
.. attribute:: invalid
Invalid
**type**\: int
**range:** 0..4294967295
.. attribute:: session_creation_started
Session creation started
**type**\: int
**range:** 0..4294967295
.. attribute:: session_features_applied
Session features applied
**type**\: int
**range:** 0..4294967295
.. attribute:: total_interfaces
Total number of interfaces in all states
**type**\: int
**range:** 0..4294967295
.. attribute:: up
Up
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_configured
VRF configured
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.adding_adjacency = None
self.adjacency_added = None
self.control_policy_executed = None
self.control_policy_executing = None
self.disconnected = None
self.disconnecting = None
self.down = None
self.error = None
self.initialized = None
self.invalid = None
self.session_creation_started = None
self.session_features_applied = None
self.total_interfaces = None
self.up = None
self.vrf_configured = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:dhcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.adding_adjacency is not None:
return True
if self.adjacency_added is not None:
return True
if self.control_policy_executed is not None:
return True
if self.control_policy_executing is not None:
return True
if self.disconnected is not None:
return True
if self.disconnecting is not None:
return True
if self.down is not None:
return True
if self.error is not None:
return True
if self.initialized is not None:
return True
if self.invalid is not None:
return True
if self.session_creation_started is not None:
return True
if self.session_features_applied is not None:
return True
if self.total_interfaces is not None:
return True
if self.up is not None:
return True
if self.vrf_configured is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp']['meta_info']
class PacketTrigger(object):
"""
Packet trigger
.. attribute:: adding_adjacency
Adding adjacency
**type**\: int
**range:** 0..4294967295
.. attribute:: adjacency_added
Adjacency added
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executed
Control policy executed
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executing
Control policy executing
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnected
Disconnected
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnecting
Disconnecting
**type**\: int
**range:** 0..4294967295
.. attribute:: down
Down
**type**\: int
**range:** 0..4294967295
.. attribute:: error
Error
**type**\: int
**range:** 0..4294967295
.. attribute:: initialized
Initialized
**type**\: int
**range:** 0..4294967295
.. attribute:: invalid
Invalid
**type**\: int
**range:** 0..4294967295
.. attribute:: session_creation_started
Session creation started
**type**\: int
**range:** 0..4294967295
.. attribute:: session_features_applied
Session features applied
**type**\: int
**range:** 0..4294967295
.. attribute:: total_interfaces
Total number of interfaces in all states
**type**\: int
**range:** 0..4294967295
.. attribute:: up
Up
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_configured
VRF configured
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.adding_adjacency = None
self.adjacency_added = None
self.control_policy_executed = None
self.control_policy_executing = None
self.disconnected = None
self.disconnecting = None
self.down = None
self.error = None
self.initialized = None
self.invalid = None
self.session_creation_started = None
self.session_features_applied = None
self.total_interfaces = None
self.up = None
self.vrf_configured = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:packet-trigger'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.adding_adjacency is not None:
return True
if self.adjacency_added is not None:
return True
if self.control_policy_executed is not None:
return True
if self.control_policy_executing is not None:
return True
if self.disconnected is not None:
return True
if self.disconnecting is not None:
return True
if self.down is not None:
return True
if self.error is not None:
return True
if self.initialized is not None:
return True
if self.invalid is not None:
return True
if self.session_creation_started is not None:
return True
if self.session_features_applied is not None:
return True
if self.total_interfaces is not None:
return True
if self.up is not None:
return True
if self.vrf_configured is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:initiators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dhcp is not None and self.dhcp._has_data():
return True
if self.packet_trigger is not None and self.packet_trigger._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators']['meta_info']
class Ipv6Initiators(object):
"""
IPv6 Initiators
.. attribute:: dhcp
DHCP
**type**\: :py:class:`Dhcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp>`
.. attribute:: packet_trigger
Packet trigger
**type**\: :py:class:`PacketTrigger <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp = IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp()
self.dhcp.parent = self
self.packet_trigger = IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger()
self.packet_trigger.parent = self
class Dhcp(object):
"""
DHCP
.. attribute:: adding_adjacency
Adding adjacency
**type**\: int
**range:** 0..4294967295
.. attribute:: adjacency_added
Adjacency added
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executed
Control policy executed
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executing
Control policy executing
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnected
Disconnected
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnecting
Disconnecting
**type**\: int
**range:** 0..4294967295
.. attribute:: down
Down
**type**\: int
**range:** 0..4294967295
.. attribute:: error
Error
**type**\: int
**range:** 0..4294967295
.. attribute:: initialized
Initialized
**type**\: int
**range:** 0..4294967295
.. attribute:: invalid
Invalid
**type**\: int
**range:** 0..4294967295
.. attribute:: session_creation_started
Session creation started
**type**\: int
**range:** 0..4294967295
.. attribute:: session_features_applied
Session features applied
**type**\: int
**range:** 0..4294967295
.. attribute:: total_interfaces
Total number of interfaces in all states
**type**\: int
**range:** 0..4294967295
.. attribute:: up
Up
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_configured
VRF configured
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.adding_adjacency = None
self.adjacency_added = None
self.control_policy_executed = None
self.control_policy_executing = None
self.disconnected = None
self.disconnecting = None
self.down = None
self.error = None
self.initialized = None
self.invalid = None
self.session_creation_started = None
self.session_features_applied = None
self.total_interfaces = None
self.up = None
self.vrf_configured = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:dhcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.adding_adjacency is not None:
return True
if self.adjacency_added is not None:
return True
if self.control_policy_executed is not None:
return True
if self.control_policy_executing is not None:
return True
if self.disconnected is not None:
return True
if self.disconnecting is not None:
return True
if self.down is not None:
return True
if self.error is not None:
return True
if self.initialized is not None:
return True
if self.invalid is not None:
return True
if self.session_creation_started is not None:
return True
if self.session_features_applied is not None:
return True
if self.total_interfaces is not None:
return True
if self.up is not None:
return True
if self.vrf_configured is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp']['meta_info']
class PacketTrigger(object):
"""
Packet trigger
.. attribute:: adding_adjacency
Adding adjacency
**type**\: int
**range:** 0..4294967295
.. attribute:: adjacency_added
Adjacency added
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executed
Control policy executed
**type**\: int
**range:** 0..4294967295
.. attribute:: control_policy_executing
Control policy executing
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnected
Disconnected
**type**\: int
**range:** 0..4294967295
.. attribute:: disconnecting
Disconnecting
**type**\: int
**range:** 0..4294967295
.. attribute:: down
Down
**type**\: int
**range:** 0..4294967295
.. attribute:: error
Error
**type**\: int
**range:** 0..4294967295
.. attribute:: initialized
Initialized
**type**\: int
**range:** 0..4294967295
.. attribute:: invalid
Invalid
**type**\: int
**range:** 0..4294967295
.. attribute:: session_creation_started
Session creation started
**type**\: int
**range:** 0..4294967295
.. attribute:: session_features_applied
Session features applied
**type**\: int
**range:** 0..4294967295
.. attribute:: total_interfaces
Total number of interfaces in all states
**type**\: int
**range:** 0..4294967295
.. attribute:: up
Up
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_configured
VRF configured
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.adding_adjacency = None
self.adjacency_added = None
self.control_policy_executed = None
self.control_policy_executing = None
self.disconnected = None
self.disconnecting = None
self.down = None
self.error = None
self.initialized = None
self.invalid = None
self.session_creation_started = None
self.session_features_applied = None
self.total_interfaces = None
self.up = None
self.vrf_configured = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:packet-trigger'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.adding_adjacency is not None:
return True
if self.adjacency_added is not None:
return True
if self.control_policy_executed is not None:
return True
if self.control_policy_executing is not None:
return True
if self.disconnected is not None:
return True
if self.disconnecting is not None:
return True
if self.down is not None:
return True
if self.error is not None:
return True
if self.initialized is not None:
return True
if self.invalid is not None:
return True
if self.session_creation_started is not None:
return True
if self.session_features_applied is not None:
return True
if self.total_interfaces is not None:
return True
if self.up is not None:
return True
if self.vrf_configured is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:ipv6-initiators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dhcp is not None and self.dhcp._has_data():
return True
if self.packet_trigger is not None and self.packet_trigger._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:interface-counts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.initiators is not None and self.initiators._has_data():
return True
if self.ipv6_initiators is not None and self.ipv6_initiators._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts']['meta_info']
class Vrf(object):
"""
Array of VRFs with IPSUB interfaces
.. attribute:: interfaces
Number of IP subscriber interfaces in the VRF table
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipv6_interfaces
Number of IPv6 subscriber interfaces in the VRF table
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipv6vrf_name
IPv6 VRF
**type**\: str
.. attribute:: vrf_name
IPv4 VRF
**type**\: str
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interfaces = None
self.ipv6_interfaces = None
self.ipv6vrf_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:vrf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interfaces is not None:
return True
if self.ipv6_interfaces is not None:
return True
if self.ipv6vrf_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary.Vrf']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_interface_summary is not None and self.access_interface_summary._has_data():
return True
if self.interface_counts is not None and self.interface_counts._has_data():
return True
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Summary']['meta_info']
class Interfaces(object):
"""
IP subscriber interface table
.. attribute:: interface
IP subscriber interface entry
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Interfaces.Interface>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
IP subscriber interface entry
.. attribute:: interface_name <key>
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: access_interface
Access interface through which this subscriber is accessible
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: age
Age in hh\:mm\:ss format
**type**\: str
.. attribute:: current_change_age
Current change age in hh\:mm\:ss format
**type**\: str
.. attribute:: initiator
Protocol trigger for creation of this subscriber session
**type**\: :py:class:`IpsubMaIntfInitiatorDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaIntfInitiatorDataEnum>`
.. attribute:: interface_creation_time
Interface creation time in month day hh\:mm\:ss format
**type**\: str
.. attribute:: ipv6_current_change_age
IPV6 Current change age in hh\:mm\:ss format
**type**\: str
.. attribute:: ipv6_initiator
Protocol trigger for creation of this subscriber's IPv6 session
**type**\: :py:class:`IpsubMaIntfInitiatorDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaIntfInitiatorDataEnum>`
.. attribute:: ipv6_last_state_change_time
Interface's IPV6 last state change time in month day hh\:mm\:ss format
**type**\: str
.. attribute:: ipv6_old_state
Previous state of the subscriber's IPv6 session
**type**\: :py:class:`IpsubMaIntfStateDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaIntfStateDataEnum>`
.. attribute:: ipv6_state
State of the subscriber's IPv6 session
**type**\: :py:class:`IpsubMaIntfStateDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaIntfStateDataEnum>`
.. attribute:: ipv6vrf
IPv6 VRF details
**type**\: :py:class:`Ipv6Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf>`
.. attribute:: is_l2_connected
True if L2 connected
**type**\: bool
.. attribute:: last_state_change_time
Interface's last state change time in month day hh\:mm\:ss format
**type**\: str
.. attribute:: old_state
Previous state of the subscriber session
**type**\: :py:class:`IpsubMaIntfStateDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaIntfStateDataEnum>`
.. attribute:: session_type
Session Type
**type**\: str
.. attribute:: state
State of the subscriber session
**type**\: :py:class:`IpsubMaIntfStateDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaIntfStateDataEnum>`
.. attribute:: subscriber_ipv4_address
IPv4 Address of the subscriber
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: subscriber_ipv6_address
IPv6 Address of the subscriber
**type**\: str
.. attribute:: subscriber_label
Subscriber label for this subscriber interface
**type**\: int
**range:** 0..4294967295
.. attribute:: subscriber_mac_addres
MAC address of the subscriber
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: vrf
IPv4 VRF details
**type**\: :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.access_interface = None
self.age = None
self.current_change_age = None
self.initiator = None
self.interface_creation_time = None
self.ipv6_current_change_age = None
self.ipv6_initiator = None
self.ipv6_last_state_change_time = None
self.ipv6_old_state = None
self.ipv6_state = None
self.ipv6vrf = IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf()
self.ipv6vrf.parent = self
self.is_l2_connected = None
self.last_state_change_time = None
self.old_state = None
self.session_type = None
self.state = None
self.subscriber_ipv4_address = None
self.subscriber_ipv6_address = None
self.subscriber_label = None
self.subscriber_mac_addres = None
self.vrf = IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf()
self.vrf.parent = self
class Vrf(object):
"""
IPv4 VRF details
.. attribute:: table_name
Table
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.table_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:vrf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.table_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf']['meta_info']
class Ipv6Vrf(object):
"""
IPv6 VRF details
.. attribute:: table_name
Table
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.table_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:ipv6vrf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.table_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:interface[Cisco-IOS-XR-subscriber-ipsub-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_name is not None:
return True
if self.access_interface is not None:
return True
if self.age is not None:
return True
if self.current_change_age is not None:
return True
if self.initiator is not None:
return True
if self.interface_creation_time is not None:
return True
if self.ipv6_current_change_age is not None:
return True
if self.ipv6_initiator is not None:
return True
if self.ipv6_last_state_change_time is not None:
return True
if self.ipv6_old_state is not None:
return True
if self.ipv6_state is not None:
return True
if self.ipv6vrf is not None and self.ipv6vrf._has_data():
return True
if self.is_l2_connected is not None:
return True
if self.last_state_change_time is not None:
return True
if self.old_state is not None:
return True
if self.session_type is not None:
return True
if self.state is not None:
return True
if self.subscriber_ipv4_address is not None:
return True
if self.subscriber_ipv6_address is not None:
return True
if self.subscriber_label is not None:
return True
if self.subscriber_mac_addres is not None:
return True
if self.vrf is not None and self.vrf._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.Interfaces']['meta_info']
class AccessInterfaces(object):
"""
IP subscriber access interface table
.. attribute:: access_interface
IP subscriber access interface entry
**type**\: list of :py:class:`AccessInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_interface = YList()
self.access_interface.parent = self
self.access_interface.name = 'access_interface'
class AccessInterface(object):
"""
IP subscriber access interface entry
.. attribute:: interface_name <key>
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: age
Age in HH\:MM\:SS format
**type**\: str
.. attribute:: initiators
Configurational state\-statistics for each initiating protocol enabled on this parent interface
**type**\: :py:class:`Initiators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators>`
.. attribute:: interface_creation_time
Interface creation time in Month Date HH\:MM\:SS format
**type**\: str
.. attribute:: interface_type
Interface Type
**type**\: str
.. attribute:: ipv6_initiators
Configurational state\-statistics for each initiating protocol enabled on this parent interface for IPv6 session
**type**\: :py:class:`Ipv6Initiators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators>`
.. attribute:: ipv6_state
Operational ipv6 state of this interface
**type**\: :py:class:`IpsubMaParentIntfStateDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaParentIntfStateDataEnum>`
.. attribute:: session_limit
Configuration session limits for each session limit source and type
**type**\: :py:class:`SessionLimit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit>`
.. attribute:: state
Operational state of this interface
**type**\: :py:class:`IpsubMaParentIntfStateDataEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaParentIntfStateDataEnum>`
.. attribute:: vlan_type
The VLAN type on the access interface
**type**\: :py:class:`IpsubMaParentIntfVlanEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpsubMaParentIntfVlanEnum>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.age = None
self.initiators = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators()
self.initiators.parent = self
self.interface_creation_time = None
self.interface_type = None
self.ipv6_initiators = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators()
self.ipv6_initiators.parent = self
self.ipv6_state = None
self.session_limit = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit()
self.session_limit.parent = self
self.state = None
self.vlan_type = None
class Initiators(object):
"""
Configurational state\-statistics for each
initiating protocol enabled on this parent
interface
.. attribute:: dhcp
DHCP information
**type**\: :py:class:`Dhcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp>`
.. attribute:: packet_trigger
packet trigger information
**type**\: :py:class:`PacketTrigger <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp()
self.dhcp.parent = self
self.packet_trigger = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger()
self.packet_trigger.parent = self
class Dhcp(object):
"""
DHCP information
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_bytes
Number of first sign of life bytes received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_packets
Number of first sign of life packets received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_dup_addr
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to duplicate source address
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_flow
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding creation rate
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_session_limit
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding one or more of the configured session limits
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
.. attribute:: is_configured
Ture if the initiator is configred
**type**\: bool
.. attribute:: sessions
Number of sessions currently up for each initiator
**type**\: int
**range:** 0..4294967295
.. attribute:: unique_ip_check
True if check for subscriber address uniquenessduring first sign of life is enabled
**type**\: bool
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_dropped_bytes = None
self.fsol_dropped_packets = None
self.fsol_dropped_packets_dup_addr = None
self.fsol_dropped_packets_flow = None
self.fsol_dropped_packets_session_limit = None
self.fsol_packets = None
self.is_configured = None
self.sessions = None
self.unique_ip_check = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:dhcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_dropped_bytes is not None:
return True
if self.fsol_dropped_packets is not None:
return True
if self.fsol_dropped_packets_dup_addr is not None:
return True
if self.fsol_dropped_packets_flow is not None:
return True
if self.fsol_dropped_packets_session_limit is not None:
return True
if self.fsol_packets is not None:
return True
if self.is_configured is not None:
return True
if self.sessions is not None:
return True
if self.unique_ip_check is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp']['meta_info']
class PacketTrigger(object):
"""
packet trigger information
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_bytes
Number of first sign of life bytes received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_packets
Number of first sign of life packets received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_dup_addr
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to duplicate source address
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_flow
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding creation rate
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_session_limit
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding one or more of the configured session limits
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
.. attribute:: is_configured
Ture if the initiator is configred
**type**\: bool
.. attribute:: sessions
Number of sessions currently up for each initiator
**type**\: int
**range:** 0..4294967295
.. attribute:: unique_ip_check
True if check for subscriber address uniquenessduring first sign of life is enabled
**type**\: bool
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_dropped_bytes = None
self.fsol_dropped_packets = None
self.fsol_dropped_packets_dup_addr = None
self.fsol_dropped_packets_flow = None
self.fsol_dropped_packets_session_limit = None
self.fsol_packets = None
self.is_configured = None
self.sessions = None
self.unique_ip_check = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:packet-trigger'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_dropped_bytes is not None:
return True
if self.fsol_dropped_packets is not None:
return True
if self.fsol_dropped_packets_dup_addr is not None:
return True
if self.fsol_dropped_packets_flow is not None:
return True
if self.fsol_dropped_packets_session_limit is not None:
return True
if self.fsol_packets is not None:
return True
if self.is_configured is not None:
return True
if self.sessions is not None:
return True
if self.unique_ip_check is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:initiators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dhcp is not None and self.dhcp._has_data():
return True
if self.packet_trigger is not None and self.packet_trigger._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators']['meta_info']
class Ipv6Initiators(object):
"""
Configurational state\-statistics for each
initiating protocol enabled on this parent
interface for IPv6 session
.. attribute:: dhcp
DHCP information
**type**\: :py:class:`Dhcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp>`
.. attribute:: packet_trigger
packet trigger information
**type**\: :py:class:`PacketTrigger <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp()
self.dhcp.parent = self
self.packet_trigger = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger()
self.packet_trigger.parent = self
class Dhcp(object):
"""
DHCP information
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_bytes
Number of first sign of life bytes received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_packets
Number of first sign of life packets received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_dup_addr
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to duplicate source address
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_flow
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding creation rate
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_session_limit
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding one or more of the configured session limits
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
.. attribute:: is_configured
Ture if the initiator is configred
**type**\: bool
.. attribute:: sessions
Number of sessions currently up for each initiator
**type**\: int
**range:** 0..4294967295
.. attribute:: unique_ip_check
True if check for subscriber address uniquenessduring first sign of life is enabled
**type**\: bool
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_dropped_bytes = None
self.fsol_dropped_packets = None
self.fsol_dropped_packets_dup_addr = None
self.fsol_dropped_packets_flow = None
self.fsol_dropped_packets_session_limit = None
self.fsol_packets = None
self.is_configured = None
self.sessions = None
self.unique_ip_check = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:dhcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_dropped_bytes is not None:
return True
if self.fsol_dropped_packets is not None:
return True
if self.fsol_dropped_packets_dup_addr is not None:
return True
if self.fsol_dropped_packets_flow is not None:
return True
if self.fsol_dropped_packets_session_limit is not None:
return True
if self.fsol_packets is not None:
return True
if self.is_configured is not None:
return True
if self.sessions is not None:
return True
if self.unique_ip_check is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp']['meta_info']
class PacketTrigger(object):
"""
packet trigger information
.. attribute:: fsol_bytes
Number of first sign of life bytes received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_bytes
Number of first sign of life bytes received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: fsol_dropped_packets
Number of first sign of life packets received for initiating protocol on this interface that were dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_dup_addr
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to duplicate source address
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_flow
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding creation rate
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_dropped_packets_session_limit
Number of first sign of life packets received for initiating protocol on this interface that were dropped due to exceeding one or more of the configured session limits
**type**\: int
**range:** 0..4294967295
.. attribute:: fsol_packets
Number of first sign of life packets received for initiating protocol on this interface
**type**\: int
**range:** 0..4294967295
.. attribute:: is_configured
Ture if the initiator is configred
**type**\: bool
.. attribute:: sessions
Number of sessions currently up for each initiator
**type**\: int
**range:** 0..4294967295
.. attribute:: unique_ip_check
True if check for subscriber address uniquenessduring first sign of life is enabled
**type**\: bool
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fsol_bytes = None
self.fsol_dropped_bytes = None
self.fsol_dropped_packets = None
self.fsol_dropped_packets_dup_addr = None
self.fsol_dropped_packets_flow = None
self.fsol_dropped_packets_session_limit = None
self.fsol_packets = None
self.is_configured = None
self.sessions = None
self.unique_ip_check = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:packet-trigger'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fsol_bytes is not None:
return True
if self.fsol_dropped_bytes is not None:
return True
if self.fsol_dropped_packets is not None:
return True
if self.fsol_dropped_packets_dup_addr is not None:
return True
if self.fsol_dropped_packets_flow is not None:
return True
if self.fsol_dropped_packets_session_limit is not None:
return True
if self.fsol_packets is not None:
return True
if self.is_configured is not None:
return True
if self.sessions is not None:
return True
if self.unique_ip_check is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:ipv6-initiators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dhcp is not None and self.dhcp._has_data():
return True
if self.packet_trigger is not None and self.packet_trigger._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators']['meta_info']
class SessionLimit(object):
"""
Configuration session limits for each session
limit source and type
.. attribute:: total
All sources session limits
**type**\: :py:class:`Total <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total>`
.. attribute:: unclassified_source
Unclassified source session limits
**type**\: :py:class:`UnclassifiedSource <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper.IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource>`
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.total = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total()
self.total.parent = self
self.unclassified_source = IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource()
self.unclassified_source.parent = self
class UnclassifiedSource(object):
"""
Unclassified source session limits
.. attribute:: per_vlan
Per\-VLAN limit category
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.per_vlan = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:unclassified-source'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.per_vlan is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource']['meta_info']
class Total(object):
"""
All sources session limits
.. attribute:: per_vlan
Per\-VLAN limit category
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-ipsub-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.per_vlan = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:total'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.per_vlan is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:session-limit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.total is not None and self.total._has_data():
return True
if self.unclassified_source is not None and self.unclassified_source._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:access-interface[Cisco-IOS-XR-subscriber-ipsub-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_name is not None:
return True
if self.age is not None:
return True
if self.initiators is not None and self.initiators._has_data():
return True
if self.interface_creation_time is not None:
return True
if self.interface_type is not None:
return True
if self.ipv6_initiators is not None and self.ipv6_initiators._has_data():
return True
if self.ipv6_state is not None:
return True
if self.session_limit is not None and self.session_limit._has_data():
return True
if self.state is not None:
return True
if self.vlan_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-subscriber-ipsub-oper:access-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_interface is not None:
for child_ref in self.access_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node.AccessInterfaces']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-subscriber-ipsub-oper:ip-subscriber/Cisco-IOS-XR-subscriber-ipsub-oper:nodes/Cisco-IOS-XR-subscriber-ipsub-oper:node[Cisco-IOS-XR-subscriber-ipsub-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node_name is not None:
return True
if self.access_interfaces is not None and self.access_interfaces._has_data():
return True
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.summary is not None and self.summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-subscriber-ipsub-oper:ip-subscriber/Cisco-IOS-XR-subscriber-ipsub-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-subscriber-ipsub-oper:ip-subscriber'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_ipsub_oper as meta
return meta._meta_table['IpSubscriber']['meta_info']
| {
"content_hash": "929c5ad2306a9e95de4609dece197d8a",
"timestamp": "",
"source": "github",
"line_count": 3311,
"max_line_length": 301,
"avg_line_length": 42.614919963757174,
"alnum_prop": 0.38632723355398374,
"repo_name": "111pontes/ydk-py",
"id": "efa0f8c8824e3373fa8cbb148f37b80151182f15",
"size": "141098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_subscriber_ipsub_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.3, s, t 0.5, s, t 1, s, q"
tags = "Ripple3D"
import pyglet
from pyglet.gl import glColor4ub, glPushMatrix, glPopMatrix
import summa
from summa.director import director
from summa.actions import *
class BackgroundLayer(summa.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
director.set_depth_test()
main_scene = summa.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
# important: maintain the aspect ratio in the grid
e = Ripple3D( radius=240, grid=(32,24), duration=20, waves=20,
amplitude=60 )
main_scene.do( e )
director.run (main_scene)
if __name__ == '__main__':
main()
| {
"content_hash": "c2e581b385747a039f7ec95bd85df4e8",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 24.09090909090909,
"alnum_prop": 0.6254716981132076,
"repo_name": "shackra/thomas-aquinas",
"id": "a1662e5c1fb1e1239924f9c989c807b3df6ac377",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable-branch",
"path": "tests/test_ripple3d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1245155"
}
],
"symlink_target": ""
} |
import json
class BaseProcessor(object):
"""
Base class for processing lists of scraped Items and inserting them into the database
Subclasses should implement a process method
"""
def __init__(self, items_file_path, job=None):
self.items_file_path = items_file_path
self.job = job # The ScrapeJob, if available
self._count_created = 0
self._count_updated = 0
self._count_error = 0
def process(self, *args, **kwargs):
pass
def file_wrapper(fp):
"""
Yields parsed JSON objects from a line separated JSON file
"""
if isinstance(fp, basestring):
with open(fp, 'rb') as fp:
for line in fp:
yield json.loads(line)
else:
for line in fp:
yield json.loads(line)
| {
"content_hash": "3abc96f07676892bb0fc73e69b2b8bd7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 89,
"avg_line_length": 23.852941176470587,
"alnum_prop": 0.5955610357583231,
"repo_name": "legco-watch/legco-watch",
"id": "12bd6ec3c5b2886de6ada753822cd53ea10f7d22",
"size": "811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/raw/processors/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "842"
},
{
"name": "HTML",
"bytes": "523569"
},
{
"name": "Python",
"bytes": "535583"
},
{
"name": "Ruby",
"bytes": "2633"
},
{
"name": "Shell",
"bytes": "8089"
}
],
"symlink_target": ""
} |
"""
=====================================
Create a minimalistic user interface
=====================================
DIPY allows to create a minimalistic interface using widgets.
In this example we will create: a) two parallel steamtubes, b) add some buttons
which will change the opacity of these tubes and c) move the streamtubes using
a slider.
"""
from __future__ import print_function
import numpy as np
from dipy.viz import window, actor, widget
from dipy.data import fetch_viz_icons, read_viz_icons
"""
First, we add a couple of streamtubes to the Renderer
"""
renderer = window.Renderer()
lines = [np.array([[-1, 0, 0.], [1, 0, 0.]]),
np.array([[-1, 1, 0.], [1, 1, 0.]])]
colors = np.array([[1., 0., 0.], [0., .5, 0.]])
stream_actor = actor.streamtube(lines, colors, linewidth=0.3, lod=False)
renderer.add(stream_actor)
"""
The ``ShowManager`` allows to break the visualization process in steps so that
the widgets can be added and updated properly.
"""
show_manager = window.ShowManager(renderer, size=(800, 800),
order_transparent=True)
"""
Next we add the widgets and their callbacks.
"""
def button_plus_callback(obj, event):
print('+ pressed')
opacity = stream_actor.GetProperty().GetOpacity()
if opacity < 1:
stream_actor.GetProperty().SetOpacity(opacity + 0.1)
def button_minus_callback(obj, event):
print('- pressed')
opacity = stream_actor.GetProperty().GetOpacity()
if opacity > 0:
stream_actor.GetProperty().SetOpacity(opacity - 0.1)
"""
We need to download some icons to create a face for our buttons. We provide
some simple icons in this tutorial. But you of course you can use any PNG icon
you may want.
"""
fetch_viz_icons()
button_png_plus = read_viz_icons(fname='plus.png')
button_plus = widget.button(show_manager.iren,
show_manager.ren,
button_plus_callback,
button_png_plus, (.98, .9), (120, 50))
button_png_minus = read_viz_icons(fname='minus.png')
button_minus = widget.button(show_manager.iren,
show_manager.ren,
button_minus_callback,
button_png_minus, (.98, .9), (50, 50))
def move_lines(obj, event):
stream_actor.SetPosition((obj.get_value(), 0, 0))
"""
Then we create the slider.
"""
slider = widget.slider(show_manager.iren, show_manager.ren,
callback=move_lines,
min_value=-1,
max_value=1,
value=0.,
label="X",
right_normalized_pos=(.98, 0.7),
size=(120, 0), label_format="%0.2lf",
color=(0.4, 0.4, 0.4),
selected_color=(0.2, 0.2, 0.2))
"""
And we add a simple clickable text overlay at the bottom left corner.
"""
def text_clicked(obj, event):
print("Awesome!")
text = widget.text(show_manager.iren, show_manager.ren,
message="Powered by DIPY",
callback=text_clicked,
color=(1., .5, .0),
left_down_pos=(10, 5),
right_top_pos=(200, 35))
"""
Position the camera.
"""
renderer.zoom(0.7)
renderer.roll(10.)
renderer.reset_clipping_range()
"""
Uncomment the following lines to start the interaction.
"""
# show_manager.initialize()
# show_manager.render()
# show_manager.start()
window.record(renderer, out_path='mini_ui.png', size=(800, 800),
reset_camera=False)
del show_manager
"""
.. figure:: mini_ui.png
:align: center
**A minimalistic user interface**.
"""
| {
"content_hash": "899dd6e453a0a1bbca7bbbf7365e51c1",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 26.920289855072465,
"alnum_prop": 0.5800807537012113,
"repo_name": "StongeEtienne/dipy",
"id": "54459ea441366b3b229f4e3c4d9407a3bd126dd7",
"size": "3715",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/examples/viz_widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2844"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2734362"
}
],
"symlink_target": ""
} |
import logging
import pymongo
from bson import ObjectId
from bson.errors import InvalidId
from flask import flash
from flask.ext.admin._compat import string_types
from flask.ext.admin.babel import gettext, ngettext, lazy_gettext
from flask.ext.admin.model import BaseModelView
from flask.ext.admin.actions import action
from flask.ext.admin.helpers import get_form_data
from .filters import BasePyMongoFilter
from .tools import parse_like_term
# Set up logger
log = logging.getLogger("flask-admin.pymongo")
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Should contain instances of
:class:`flask.ext.admin.contrib.pymongo.filters.BasePyMongoFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'),)
"""
def __init__(self, coll,
name=None, category=None, endpoint=None, url=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param coll:
MongoDB collection object
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask.ext.admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask.ext.admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask.ext.admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
if name is None:
name = self._prettify_name(coll.name)
if endpoint is None:
endpoint = ('%sview' % coll.name).lower()
super(ModelView, self).__init__(None, name, category, endpoint, url,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self.coll = coll
def scaffold_pk(self):
return '_id'
def get_pk_value(self, model):
"""
Return primary key value from the model instance
:param model:
Model instance
"""
return model.get('_id')
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
raise NotImplementedError()
def scaffold_sortable_columns(self):
"""
Return sortable columns dictionary (name, field)
"""
return []
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if not isinstance(p, string_types):
raise ValueError('Expected string')
# TODO: Validation?
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, attr):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
raise NotImplementedError()
def is_valid_filter(self, filter):
"""
Validate if it is valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BasePyMongoFilter)
def scaffold_form(self):
raise NotImplementedError()
def _get_field_value(self, model, name):
"""
Get unformatted field value from the model
"""
return model.get(name)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied fiters
:param execute:
Run query immediately or not
"""
query = {}
# Filters
if self._filters:
data = []
for flt, value in filters:
f = self._filters[flt]
data = f.apply(data, value)
if data:
if len(data) == 1:
query = data[0]
else:
query['$and'] = data
# Search
if self._search_supported and search:
values = search.split(' ')
queries = []
# Construct inner querie
for value in values:
if not value:
continue
regex = parse_like_term(value)
stmt = []
for field in self._search_fields:
stmt.append({field: {'$regex': regex}})
if stmt:
if len(stmt) == 1:
queries.append(stmt[0])
else:
queries.append({'$or': stmt})
# Construct final query
if queries:
if len(queries) == 1:
final = queries[0]
else:
final = {'$and': queries}
if query:
query = {'$and': [query, final]}
else:
query = final
# Get count
count = self.coll.find(query).count()
# Sorting
sort_by = None
if sort_column:
sort_by = [(sort_column, pymongo.DESCENDING if sort_desc else pymongo.ASCENDING)]
else:
order = self._get_default_order()
if order:
sort_by = [(order[0], pymongo.DESCENDING if order[1] else pymongo.ASCENDING)]
# Pagination
skip = None
if page is not None:
skip = page * self.page_size
results = self.coll.find(query, sort=sort_by, skip=skip, limit=self.page_size)
if execute:
results = list(results)
return count, results
def _get_valid_id(self, id):
try:
return ObjectId(id)
except InvalidId:
return id
def get_one(self, id):
"""
Return single model instance by ID
:param id:
Model ID
"""
return self.coll.find_one({'_id': self._get_valid_id(id)})
def edit_form(self, obj):
"""
Create edit form from the MongoDB document
"""
return self._edit_form_class(get_form_data(), **obj)
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = form.data
self._on_model_change(form, model, True)
self.coll.insert(model)
except Exception as ex:
flash(gettext('Failed to create model. %(error)s', error=str(ex)),
'error')
log.exception('Failed to create model')
return False
else:
self.after_model_change(form, model, True)
return True
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
model.update(form.data)
self._on_model_change(form, model, False)
pk = self.get_pk_value(model)
self.coll.update({'_id': pk}, model)
except Exception as ex:
flash(gettext('Failed to update model. %(error)s', error=str(ex)),
'error')
log.exception('Failed to update model')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
pk = self.get_pk_value(model)
if not pk:
raise ValueError('Document does not have _id')
self.on_model_delete(model)
self.coll.remove({'_id': pk})
return True
except Exception as ex:
flash(gettext('Failed to delete model. %(error)s', error=str(ex)),
'error')
log.exception('Failed to delete model')
return False
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected models?'))
def action_delete(self, ids):
try:
count = 0
# TODO: Optimize me
for pk in ids:
if self.delete_model(self.get_one(pk)):
count += 1
flash(ngettext('Model was successfully deleted.',
'%(count)s models were successfully deleted.',
count,
count=count))
except Exception as ex:
flash(gettext('Failed to delete models. %(error)s', error=str(ex)), 'error')
| {
"content_hash": "a78785a0e9b49a9128d0879b06b9d205",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 102,
"avg_line_length": 28.21727019498607,
"alnum_prop": 0.5045409674234945,
"repo_name": "pawl/flask-admin",
"id": "c21e9e8983d3915b58f68ae5284b9bed163c75d9",
"size": "10130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_admin/contrib/pymongo/view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2285"
},
{
"name": "JavaScript",
"bytes": "18731"
},
{
"name": "Makefile",
"bytes": "5587"
},
{
"name": "Python",
"bytes": "517076"
},
{
"name": "Shell",
"bytes": "250"
}
],
"symlink_target": ""
} |
from setuptools import setup, Command
import sys
sys.path.insert(0, '.')
import injector
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
errno = subprocess.call([sys.executable, 'runtest.py'])
raise SystemExit(errno)
version = injector.__version__
version_tag = injector.__version_tag__
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print('WARNING: Could not locate pandoc, using Markdown long_description.')
long_description = open('README.md').read()
description = long_description.splitlines()[0].strip()
setup(
name='injector',
url='http://github.com/alecthomas/injector',
download_url='http://pypi.python.org/pypi/injector',
version=version,
options=dict(egg_info=dict(tag_build=version_tag)),
description=description,
long_description=long_description,
license='BSD',
platforms=['any'],
py_modules=['injector'],
author='Alec Thomas',
author_email='alec@swapoff.org',
install_requires=[
'setuptools >= 0.6b1',
],
cmdclass={'test': PyTest},
keywords=[
'Dependency Injection', 'DI', 'Dependency Injection framework',
'Inversion of Control', 'IoC', 'Inversion of Control container',
],
)
| {
"content_hash": "e5cb6e02382dc14bc821d034b5e65708",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 25.178571428571427,
"alnum_prop": 0.6567375886524822,
"repo_name": "rutsky/injector",
"id": "08cc59c97a9f517929a242155d9ff9b2df76d58e",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "255718"
}
],
"symlink_target": ""
} |
"""Tests for classifier services"""
import os
from core.domain import classifier_registry
from core.domain import classifier_services
from core.domain import exp_services
from core.tests import test_utils
import feconf
import utils
class ClassifierServicesTests(test_utils.GenericTestBase):
"""Test reader.classify using the sample explorations.
Since the end to end tests cover correct classification, and frontend tests
test hard rules, ReaderClassifyTests is only checking that the string
classifier is actually called.
"""
def setUp(self):
super(ClassifierServicesTests, self).setUp()
self._init_classify_inputs('16')
def _init_classify_inputs(self, exploration_id):
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
self.exp_id = exploration_id
self.exp_state = (
exp_services.get_exploration_by_id(exploration_id).states['Home'])
def _is_string_classifier_called(self, answer):
sc = classifier_registry.ClassifierRegistry.get_classifier_by_id(
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'])
string_classifier_predict = (
sc.__class__.predict)
predict_counter = test_utils.CallCounter(
string_classifier_predict)
with self.swap(sc.__class__, 'predict', predict_counter):
response = classifier_services.classify(self.exp_state, answer)
answer_group_index = response['answer_group_index']
rule_spec_index = response['rule_spec_index']
answer_groups = self.exp_state.interaction.answer_groups
if answer_group_index == len(answer_groups):
return 'default'
answer_group = answer_groups[answer_group_index]
return (answer_group.get_classifier_rule_index() == rule_spec_index and
predict_counter.times_called == 1)
def test_string_classifier_classification(self):
"""All these responses trigger the string classifier."""
with self.swap(feconf, 'ENABLE_STRING_CLASSIFIER', True):
self.assertTrue(
self._is_string_classifier_called(
'it\'s a permutation of 3 elements'))
self.assertTrue(
self._is_string_classifier_called(
'There are 3 options for the first ball, and 2 for the '
'remaining two. So 3*2=6.'))
self.assertTrue(
self._is_string_classifier_called('abc acb bac bca cbb cba'))
self.assertTrue(
self._is_string_classifier_called('dunno, just guessed'))
| {
"content_hash": "369a5d5174594e6b477f3cd997141f14",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 40.95774647887324,
"alnum_prop": 0.6468363136176066,
"repo_name": "jestapinski/oppia",
"id": "dbf79d8a4a8998edd65dca990b597b26dcbb5672",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/classifier_services_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "567034"
},
{
"name": "HTML",
"bytes": "813759"
},
{
"name": "JavaScript",
"bytes": "2463873"
},
{
"name": "Python",
"bytes": "2892341"
},
{
"name": "Shell",
"bytes": "46684"
}
],
"symlink_target": ""
} |
"""
Tests for django.utils.
"""
from __future__ import absolute_import
from .archive import TestBzip2Tar, TestGzipTar, TestTar, TestZip
from .baseconv import TestBaseConv
from .checksums import TestUtilsChecksums
from .crypto import TestUtilsCryptoMisc, TestUtilsCryptoPBKDF2
from .datastructures import (DictWrapperTests, ImmutableListTests,
MergeDictTests, MultiValueDictTests, SortedDictTests)
from .dateformat import DateFormatTests
from .dateparse import DateParseTests
from .datetime_safe import DatetimeTests
from .decorators import DecoratorFromMiddlewareTests
from .encoding import TestEncodingUtils
from .feedgenerator import FeedgeneratorTest
from .functional import FunctionalTestCase
from .html import TestUtilsHtml
from .http import TestUtilsHttp, ETagProcessingTests, HttpDateProcessingTests
from .ipv6 import TestUtilsIPv6
from .jslex import JsToCForGettextTest, JsTokensTest
from .module_loading import (CustomLoader, DefaultLoader, EggLoader,
ModuleImportTestCase)
from .numberformat import TestNumberFormat
from .os_utils import SafeJoinTests
from .regex_helper import NormalizeTests
from .simplelazyobject import TestUtilsSimpleLazyObject
from .termcolors import TermColorTests
from .text import TestUtilsText
from .timesince import TimesinceTests
from .timezone import TimezoneTests
from .tzinfo import TzinfoTests
| {
"content_hash": "4aced7e9d708ce34e0bca48d6e3c9593",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 40.75757575757576,
"alnum_prop": 0.8535315985130112,
"repo_name": "mammique/django",
"id": "726df3d979c3d28af899e1aa9cdc3780f40ca338",
"size": "1345",
"binary": false,
"copies": "3",
"ref": "refs/heads/tp_alpha",
"path": "tests/regressiontests/utils/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "84061"
},
{
"name": "Python",
"bytes": "8016393"
},
{
"name": "Shell",
"bytes": "12116"
}
],
"symlink_target": ""
} |
class PeptideRecord:
def __init__(self, peptide, matches=[]):
self.peptide = peptide
self.peptide_parameters = None
self.matches = matches
def __str__(self):
if len(self.matches) != 0:
return received_peptide_record_to_string(self)
return missed_peptide_record_to_string(self)
def __repr__(self):
return '\nPeptide record:\n' + PeptideRecord.__str__(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.peptide == other.peptide and \
self.peptide_parameters == other.peptide_parameters and \
self.matches == other.matches
def find_peptide_record_with_peptide(records, peptide):
for record in records:
if record.peptide == peptide:
return record
return None
def received_peptide_record_to_string(record):
# received peptide record interpretation = missed peptide record interpretation + matches
result = missed_peptide_record_to_string(record)
result += ' Matches: {0}\n'.format(len(record.matches))
if len(record.matches) != 0:
index = 1
for match in record.matches:
result += ' Match #{0} of {1}:\n'.format(index, len(record.matches)) + \
str(match)
index += 1
return result
def missed_peptide_record_to_string(record):
result = str(record.peptide)
if record.peptide_parameters is not None:
result += str(record.peptide_parameters)
return result
| {
"content_hash": "c62629d48d644f4a25eefb7f1beb9e67",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 93,
"avg_line_length": 31.4,
"alnum_prop": 0.6133757961783439,
"repo_name": "zedrian/shkoma",
"id": "14674b1915c26475b3b0bde037617f05bb1e3049",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shkoma/peptide_record.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68657"
}
],
"symlink_target": ""
} |
"""Kodi service that is injected into the omnilauncher client"""
import xbmcgui
import xbmcplugin
from resources.lib.log import getLogger
log = getLogger(__name__)
class KodiService(object):
def __init__(self, handle):
log.info("KodiService handle %d", handle)
self.handle = handle
def getSetting(self, *args, **kwargs): # noqa: N802
return xbmcplugin.getSetting(self.handle, *args, **kwargs)
def listItem(self, *args, **kwargs): # noqa: N802
return xbmcgui.ListItem(*args, **kwargs)
def setInfo(self, li, *args, **kwargs): # noqa: N802
return li.setInfo(*args, **kwargs)
def setArt(self, li, *args, **kwargs): # noqa: N802
return li.setArt(*args, **kwargs)
def addDirectoryItem(self, *args, **kwargs): # noqa: N802
return xbmcplugin.addDirectoryItem(self.handle, *args, **kwargs)
def endOfDirectory(self, *args, **kwargs): # noqa: N802
return xbmcplugin.endOfDirectory(self.handle, *args, **kwargs)
def notification(self, *args, **kwargs):
xbmcgui.Dialog().notification("omnilauncher error", *args, **kwargs)
| {
"content_hash": "5e2fff945726907539435fa127210253",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 32.34285714285714,
"alnum_prop": 0.6528268551236749,
"repo_name": "lpenz/script.omnilauncher",
"id": "efe47fd9ffb1365450623ac49360461ac4af9334",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/lib/kodiservice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6773"
}
],
"symlink_target": ""
} |
class Buffer(object):
def __init__(self):
self.dict = dict()
self._lastTime = 0
def add (self, timestamp, Message):
if timestamp not in self.dict:
self.dict[timestamp] = []
self.dict[timestamp].append(Message)
return
def list(self, timestamp):
x = self.dict[timestamp]
del(self.dict[timestamp])
return x
def checkTime(self, timestamp):
if timestamp in self.dict:
return True
else:
return False
def getMsgs(self, timestamp):
x = []
for i in range(self._lastTime, timestamp + 1):
if self.checkTime(i):
y = self.list(i)
for z in y:
x = x + [ (i, z) ]
self._lastTime = timestamp
return x
| {
"content_hash": "0132e12198a20cf793ef8e591b432f7f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 48,
"avg_line_length": 20.25,
"alnum_prop": 0.6388888888888888,
"repo_name": "allanedgard/pdss",
"id": "19a6c59a956ea5d5af193ed60975556380c2fe11",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Buffer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6809"
}
],
"symlink_target": ""
} |
"""
A context manager for handling sys.displayhook.
Authors:
* Robert Kern
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.config.configurable import Configurable
from IPython.utils.traitlets import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DisplayTrap(Configurable):
"""Object to manage sys.displayhook.
This came from IPython.core.kernel.display_hook, but is simplified
(no callbacks or formatters) until more of the core is refactored.
"""
hook = Any
def __init__(self, hook=None):
super(DisplayTrap, self).__init__(hook=hook, config=None)
self.old_hook = None
# We define this to track if a single BuiltinTrap is nested.
# Only turn off the trap when the outermost call to __exit__ is made.
self._nested_level = 0
def __enter__(self):
if self._nested_level == 0:
self.set()
self._nested_level += 1
return self
def __exit__(self, type, value, traceback):
if self._nested_level == 1:
self.unset()
self._nested_level -= 1
# Returning False will cause exceptions to propagate
return False
def set(self):
"""Set the hook."""
if sys.displayhook is not self.hook:
self.old_hook = sys.displayhook
sys.displayhook = self.hook
def unset(self):
"""Unset the hook."""
sys.displayhook = self.old_hook
| {
"content_hash": "fd46f912327e64e0d61d329ec700fd62",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 30.28985507246377,
"alnum_prop": 0.4904306220095694,
"repo_name": "mattvonrocketstein/smash",
"id": "37fc847103c71cb3c4b7014e93e02bf86a666b90",
"size": "2108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/core/display_trap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
} |
from base import base_view
from teacher import teacher_view
from student import student_view
from subject import subject_view
from homework import homework_view
from schedule import schedule_view | {
"content_hash": "afc61b2fa1e7e28571ff6b20f663fa6f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 34,
"avg_line_length": 32.5,
"alnum_prop": 0.8512820512820513,
"repo_name": "hsw5138/bis",
"id": "55664b5a6a169daac8c42b30a53790de31bba2c8",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/controllers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18561"
},
{
"name": "JavaScript",
"bytes": "752"
},
{
"name": "Python",
"bytes": "55784"
}
],
"symlink_target": ""
} |
class DatahubException(Exception):
"""
There was an base exception class that occurred while handling your request to tordatahub server.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(DatahubException, self).__init__(error_msg)
self.status_code = status_code
self.request_id = request_id
self.error_code = error_code
self.error_msg = error_msg
def __str__(self):
return "status_code:%d, request_id:%s, error_code:%s, error_msg:%s"\
%(self.status_code, self.request_id, self.error_code, self.error_msg)
# A long list of server defined exceptions
class ObjectAlreadyExistException(DatahubException):
"""
The exception is raised while Datahub Object that you are creating is alreay exist.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(ObjectAlreadyExistException, self).__init__(status_code, request_id, error_code, error_msg)
class NoSuchObjectException(DatahubException):
"""
The exception is raised while Datahub Object that you are handling is not exist.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(NoSuchObjectException, self).__init__(status_code, request_id, error_code, error_msg)
class InvalidParameterException(DatahubException):
"""
The exception is raised while that your handling request parameter is invalid.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(InvalidParameterException, self).__init__(status_code, request_id, error_code, error_msg)
class InvalidShardOperationException(DatahubException):
"""
The opertaion of shard is not support yet.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(InvalidShardOperationException, self).__init__(status_code, request_id, error_code, error_msg)
class MalformedRecordException(DatahubException):
"""
The record is malformed.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(MalformedRecordException, self).__init__(status_code, request_id, error_code, error_msg)
class LimitExceededException(DatahubException):
"""
Too many request.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(LimitExceededException, self).__init__(status_code, request_id, error_code, error_msg)
class ServerInternalError(DatahubException):
"""
The Datahub server occured error.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(ServerInternalError, self).__init__(status_code, request_id, error_code, error_msg)
| {
"content_hash": "c6d3e4c52b9d2b1396597b0b7156b8e4",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 108,
"avg_line_length": 42.292307692307695,
"alnum_prop": 0.6831575118224809,
"repo_name": "jasonz93/python-tordatahub",
"id": "d37803cddc936cc3cf1b2740b67f2b9d29a7425c",
"size": "3583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tordatahub/errors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "192351"
}
],
"symlink_target": ""
} |
"""Generate YouTube variants."""
from __future__ import print_function, absolute_import
from collections import namedtuple
import json
import re
from bs4 import BeautifulSoup as BS
from common import (
datapath, log,
mkdata, mkvariant, sanitise_ws,
)
cachepath = datapath('YouTube.html')
SEARCH_URL = ('https://www.youtube.com/results?'
'gl={y.country}&persist_gl=1&search_query={{query}}')
SUGGEST_URL = ('https://suggestqueries.google.com/complete/search?'
'client=firefox&ds=yt&hl={y.lang}&q={{query}}')
# superset of Lang
YT = namedtuple('YT', 'name lang country')
def html():
"""Encoded HTML data from URL or cache (if it exists).
Returns:
str: Raw bytes returned from URL/file
"""
with open(cachepath) as fp:
return fp.read()
def parse(soup):
"""Yield `YT` tuples for BS soup.
Args:
soup (BeatifulSoup): Soup for list of Wikipedias
Yields:
tuple: Extracted `YT` tuples
"""
for a in soup.find_all('a', class_='yt-picker-item'):
# log('a=%r', a)
m = re.match(r'.+\?gl=([A-Z]+).*', a['href'])
if not m:
log('no region: %r', a['href'])
continue
country = lang = m.group(1).lower()
name = sanitise_ws(a.get_text())
yield YT(name, lang, country)
def yt2search(y):
"""Convert `YT` to search `dict`."""
uid = u'youtube.{}'.format(y.country)
desc = u'YouTube ({})'.format(y.name)
return mkvariant(y.country.lower(),
y.name, desc,
SEARCH_URL.format(y=y),
SUGGEST_URL.format(y=y),
)
def main():
data = mkdata(u'YouTube', u'Video search')
soup = BS(html(), 'html.parser')
for y in parse(soup):
data['variants'].append(yt2search(y))
print(json.dumps(data,
sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| {
"content_hash": "18133f95e2bd77b8e5e93a53c0381ed5",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 67,
"avg_line_length": 24.475,
"alnum_prop": 0.5633299284984679,
"repo_name": "deanishe/alfred-searchio",
"id": "1d71fca10d4e96a3cce13d309054456c8c23b04b",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/gen_youtube.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "6744"
},
{
"name": "HTML",
"bytes": "1346015"
},
{
"name": "Jupyter Notebook",
"bytes": "2311"
},
{
"name": "Python",
"bytes": "950614"
},
{
"name": "Shell",
"bytes": "6767"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.utils.datetime_safe
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Carro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modelo', models.CharField(max_length=50)),
('marca', models.CharField(max_length=50)),
('ano', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(2000)])),
('valor', models.FloatField()),
('data_cadastro', models.DateTimeField(default=django.utils.datetime_safe.datetime.now)),
],
),
]
| {
"content_hash": "2a2a0b628b9ba0f60a4103395401f12d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 114,
"avg_line_length": 32.25925925925926,
"alnum_prop": 0.6096440872560276,
"repo_name": "jhonatanlteodoro/CrudDjango",
"id": "cb0de1e1e54a30a42c5fa30f3d660c3496023ced",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CrudDjango/app/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2606"
},
{
"name": "Python",
"bytes": "8740"
}
],
"symlink_target": ""
} |
from settings import *
import os
import sys
BASE_PATH = os.path.dirname(__file__)
DEBUG = False
TEMPLATE_DEBUG = False # on production this should be disabeled
SERVER_EMAIL = 'reinder@rustema.nl'
DEFAULT_FROM_EMAIL = 'webmaster@democratiespel.nl'
ADMINS = (
('ReindeR Rustema', 'webmaster@democratiespel.nl'),
('Stephan Preeker', 'stephan@preeker.net'),
)
DEFAULT_FROM = 'webmaster@democratiespel.nl'
MANAGERS = ADMINS
DATABASES = {
'default' : {
'ENGINE' : 'django.db.backends.postgresql_psycopg2',
'NAME' : 'democracy_democratiespel',
'USER' : 'democracy',
'PASSWORD' : 'ADkdg5.Q',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be avilable on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'nl'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = BASE_PATH+'/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5xpy!!naf)+1e6=&%6oa2!u(@0hja#qoo=8)*(!b^x8i3kmgba'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
BASE_PATH+'/templates/',
)
| {
"content_hash": "245e7f5a6b82e0d60116c1755c2b4b51",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 88,
"avg_line_length": 31.614285714285714,
"alnum_prop": 0.7049254405784003,
"repo_name": "spreeker/democracygame",
"id": "85963c7319f4e6aed92c483a4a4bf743363e7dbd",
"size": "2259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "democracy/settings_ch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "147372"
},
{
"name": "JavaScript",
"bytes": "98320"
},
{
"name": "Python",
"bytes": "4363362"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
"""Format and output columns of data."""
__all__ = [
'Columnar',
'Formats',
]
import csv
import enum
import logging
from g1.bases.assertions import ASSERT
logging.getLogger(__name__).addHandler(logging.NullHandler())
class Formats(enum.Enum):
CSV = enum.auto()
TEXT = enum.auto()
class Columnar:
def __init__(
self,
columns,
*,
format=Formats.TEXT, # pylint: disable=redefined-builtin
header=True,
stringifiers=None,
):
self._format = ASSERT.isinstance(format, Formats)
self._header = header
self._columns = columns
self._stringifiers = stringifiers or {}
self._rows = []
def append(self, row):
self._rows.append(row)
def sort(self, key):
self._rows.sort(key=key)
def output(self, output_file):
columns = [(column, self._stringifiers.get(column, str))
for column in self._columns]
rows = [[stringifier(row[column])
for column, stringifier in columns]
for row in self._rows]
if self._format is Formats.CSV:
self._output_csv(rows, output_file)
else:
ASSERT.is_(self._format, Formats.TEXT)
self._output_text(rows, output_file)
def _output_csv(self, rows, output_file):
writer = csv.writer(output_file)
if self._header:
writer.writerow(self._columns)
for row in rows:
writer.writerow(row)
def _output_text(self, rows, output_file):
if self._header:
column_widths = list(map(len, self._columns))
else:
column_widths = [0 for _ in range(len(self._columns))]
for row in rows:
for i, cell in enumerate(row):
column_widths[i] = max(column_widths[i], len(cell))
row_format = ' '.join(
'{{{}:<{}}}'.format(i, w) for i, w in enumerate(column_widths)
)
if self._header:
output_file.write(row_format.format(*self._columns))
output_file.write('\n')
for row in rows:
output_file.write(row_format.format(*row))
output_file.write('\n')
| {
"content_hash": "98c7419af27c1c55c49d7351c7caed99",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 74,
"avg_line_length": 28.063291139240505,
"alnum_prop": 0.5570590888588183,
"repo_name": "clchiou/garage",
"id": "a9b86fbf7ee1917e48276d59579ad75aabc0ec23",
"size": "2217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/g1/texts/g1/texts/columns/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cap'n Proto",
"bytes": "6917"
},
{
"name": "HTML",
"bytes": "113"
},
{
"name": "Java",
"bytes": "61027"
},
{
"name": "Python",
"bytes": "1653733"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
} |
import argparse
import logging
import utils
class CtxWithLogger(object):
logger = logging.getLogger('internal-ssl-certs-logger')
utils.ctx = CtxWithLogger()
parser = argparse.ArgumentParser()
parser.add_argument('--metadata', default=utils.CERT_METADATA_FILE_PATH,
help='File containing the cert metadata. It should be a '
'JSON file containing an object with the '
'"internal_rest_host" and "networks" fields.')
parser.add_argument('manager_ip', default=None, nargs='?',
help='The IP of this machine on the default network')
if __name__ == '__main__':
args = parser.parse_args()
cert_metadata = utils.load_cert_metadata(filename=args.metadata)
internal_rest_host = args.manager_ip or cert_metadata['internal_rest_host']
networks = cert_metadata.get('networks', {})
networks['default'] = internal_rest_host
cert_ips = [internal_rest_host] + list(networks.values())
utils.generate_internal_ssl_cert(ips=cert_ips, name=internal_rest_host)
utils.store_cert_metadata(internal_rest_host, networks,
filename=args.metadata)
| {
"content_hash": "ca224752a753b6ffea8c08e42a65070e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 38.096774193548384,
"alnum_prop": 0.6545300592718035,
"repo_name": "cloudify-cosmo/cloudify-manager-blueprints",
"id": "60b7fa2bd52d1b274ad615a1037ba5caee7f8b18",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/manager-ip-setter/scripts/create-internal-ssl-certs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "274"
},
{
"name": "Python",
"bytes": "218680"
},
{
"name": "Shell",
"bytes": "5488"
}
],
"symlink_target": ""
} |
from lxml import html
import requests
import re
def scrape(url):
page = requests.get(url)
tree = html.fromstring(page.text)
div = tree.xpath('/html/body/div[3]/div/div[2]/div[6]/text()')
lyrics = ''.join([re.sub('[,""()\[\]]', '', x).rstrip() for x in div])
lyrics = 'BEGIN NOW ' + lyrics.replace('\n', ' ') + ' END'
return lyrics
| {
"content_hash": "f40d01987b34df6e3af779ebd4e66cab",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.5870786516853933,
"repo_name": "Sieniawsky/based-markov",
"id": "ee6db20a48f6d4a3d89349ec247b19d2685c7816",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "based/scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5089"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from django.utils.encoding import python_2_unicode_compatible
from django.db.models import Model
from cms.models import CMSPlugin
from adminsortable.models import SortableMixin
from jsonfield import JSONField
from cmsplugin_contact_plus import local_settings
def get_default_from_email_address():
email_address = ''
try:
email_address = settings.ADMINS[0][1]
except:
pass
return email_address
def get_current_site():
try:
current_site = Site.objects.get_current()
except:
current_site = 'example.com'
return _('Contact form message from {}').format(current_site)
@python_2_unicode_compatible
class ContactPlus(CMSPlugin):
title = models.CharField(_('Title'),
null=True,
blank=True,
max_length=100,
help_text=_("Title for the Contact Form."))
email_subject = models.CharField(
max_length=256,
verbose_name=_("Email subject"),
default=get_current_site)
recipient_email = models.EmailField(_("Email of recipients"),
default=get_default_from_email_address,
max_length=254)
collect_records = models.BooleanField(_('Collect Records'),
default=True,
help_text=_("If active, all records for this Form will be stored in the Database."))
thanks = models.TextField(_('Message displayed after submitting the contact form.'))
submit_button_text = models.CharField(_('Text for the Submit button.'),
blank=True,
max_length=30)
template = models.CharField(
max_length=255,
choices=local_settings.CMSPLUGIN_CONTACT_PLUS_TEMPLATES,
default='cmsplugin_contact_plus/contact.html',
editable=len(local_settings.CMSPLUGIN_CONTACT_PLUS_TEMPLATES) > 1)
class Meta:
verbose_name = "Contact Plus Form"
verbose_name_plural = "Contact Plus Forms"
def copy_relations(self, oldinstance):
for extrafield in ExtraField.objects.filter(form__pk=oldinstance.pk):
extrafield.pk = None
extrafield.save()
self.extrafield_set.add(
extrafield)
def __str__(self):
if self.title:
return self.title
return _("Contact Plus Form for %s") % self.recipient_email
def recaptcha_installed():
return ('captcha' in settings.INSTALLED_APPS and
all([hasattr(settings, s)
for s in ['RECAPTCHA_PUBLIC_KEY', 'RECAPTCHA_PRIVATE_KEY']]))
FIELD_TYPE = (('CharField', 'CharField'),
('BooleanField', 'BooleanField'),
('EmailField', 'EmailField'),
('DecimalField', 'DecimalField'),
('FloatField', 'FloatField'),
('IntegerField', 'IntegerField'),
('DateField', 'DateField'),
('DateTimeField', 'DateTimeField'),
('FileField', 'FileField'),
('ImageField', 'ImageField'),
('IPAddressField', 'IPAddressField'),
('MathCaptcha', 'Math Captcha'),
('auto_Textarea', _('CharField as Textarea')),
('auto_hidden_input', _('CharField as HiddenInput')),
('auto_referral_page', _('Referral page as HiddenInput')),
('auto_GET_parameter', _('GET parameter as HiddenInput')),
('CharFieldWithValidator', 'CharFieldWithValidator'),)
if recaptcha_installed():
FIELD_TYPE += (('ReCaptcha', 'reCAPTCHA'),)
@python_2_unicode_compatible
class ExtraField(SortableMixin):
"""
"""
form = models.ForeignKey(ContactPlus, verbose_name=_("Contact Form"), null=True, on_delete=models.SET_NULL)
label = models.CharField(_('Label'), max_length=100)
fieldType = models.CharField(max_length=100, choices=FIELD_TYPE)
initial = models.CharField(
_('Inital Value'), max_length=250, blank=True, null=True)
placeholder = models.CharField(
_('Placeholder Value'), max_length=250, blank=True, null=True)
required = models.BooleanField(
_('Mandatory field'), default=True)
widget = models.CharField(
_('Widget'), max_length=250, blank=True, null=True,
help_text=_("Will be ignored in the current version."))
inline_ordering_position = models.IntegerField(blank=True, null=True, editable=True)
def __str__(self):
return self.label
class Meta:
ordering = ('inline_ordering_position',)
@python_2_unicode_compatible
class ContactRecord(Model):
"""
"""
contact_form = models.ForeignKey(ContactPlus, verbose_name=_("Contact Form"), null=True, on_delete=models.SET_NULL)
date_of_entry = models.DateTimeField(auto_now_add=True)
date_processed = models.DateTimeField(null=True, blank=True, help_text=_("Date the Record was processed."))
data = JSONField(null=True, blank=True, default={})
class Meta():
ordering = ['date_of_entry', 'contact_form', ]
verbose_name = _("Contact Record")
verbose_name_plural = _("Contact Records")
@property
def is_processed(self):
if self.date_processed:
return True
else:
return False
def __str__(self):
return _(u"Record for %(contact)s recorded on %(date)s") % {'contact':self.contact_form,
'date': self.date_of_entry.strftime('%d. %b %Y') }
| {
"content_hash": "09a3ca0fd669c612211d8dc1923eee6a",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 119,
"avg_line_length": 36.828947368421055,
"alnum_prop": 0.6170060735977134,
"repo_name": "arteria/cmsplugin-contact-plus",
"id": "3cc960a4cc249f2f59c390b43188615b8e0fa7d2",
"size": "5598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_contact_plus/models.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "37470"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('editor', '0048_themeaccess'),
]
operations = [
migrations.AlterField(
model_name='projectaccess',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projectaccess', to='editor.project'),
),
]
| {
"content_hash": "1bf3d17bde788f8a89267d100d92c550",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 132,
"avg_line_length": 26.58823529411765,
"alnum_prop": 0.6371681415929203,
"repo_name": "numbas/editor",
"id": "3065f696eba7e1bd2ae474e5fafaf822c5434a5e",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "editor/migrations/0049_auto_20201012_1409.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44056"
},
{
"name": "HTML",
"bytes": "548468"
},
{
"name": "JavaScript",
"bytes": "2344000"
},
{
"name": "Less",
"bytes": "205670"
},
{
"name": "Makefile",
"bytes": "10028"
},
{
"name": "Python",
"bytes": "551931"
}
],
"symlink_target": ""
} |
import datetime
import errno
import fnmatch
import functools
import os
import re
import subprocess
import sys
import threading
import time
import hashlib
class cached_property(object):
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
def __get__(self, instance, owner_type=None):
if instance is None:
return self
try:
return instance.__dict__[self.__name__]
except KeyError:
value = self.func(instance)
instance.__dict__[self.__name__] = value
return value
def makedirs(*args):
path = os.path.join(*args)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
return path
_FIND_SKIP_DIRS = frozenset(('.git', '.svn'))
def find_in_tree(root, name, type='file'):
pattern = fnmatch.translate(name)
for dir_path, dir_names, file_names in os.walk(root):
# Look for the file/directory.
candidates = dict(file=file_names, dir=dir_names)[type]
found = next((x for x in candidates if re.match(pattern, x)), None)
if found:
return os.path.join(dir_path, found)
# We need to skip .git directories, just in case they are in our
# tarballs.
dir_names[:] = [x for x in dir_names if x not in _FIND_SKIP_DIRS]
# Bail when we hit a fork in the directory tree.
if len(dir_names) > 1 or file_names:
return
def guess_name(path):
path = re.sub(r'[#?].+$', '', path) # Query strings and fragments.
path = re.sub(r'(\.[\w-]+)+$', '', path) # Extensions.
path = re.sub(r'([._-])v?\d+(\.|\d|$).*$', '', path) # Version numbers.
path = re.sub(r'([._-])[._-]+', r'\1', path) # Collapse punctuation.
part_iter = reversed(re.split(r'[@:/+]', path)) # Split!
part_iter = (re.sub(r'(^\W+|\W+$)', '', x) for x in part_iter) # Strip outer punctuation.
part_iter = (x for x in part_iter if x) # Skip empties.
return next(part_iter).lower()
def linktree(src, dst, symlinks=False, ignore=None):
if not symlinks:
raise NotImplementedError('symlinks')
src = os.path.abspath(src)
dst = os.path.abspath(dst)
for src_dir, dir_names, file_names in os.walk(src):
dst_dir = os.path.join(dst, os.path.relpath(src_dir, src))
if ignore is not None:
ignored_names = ignore(src_dir, dir_names + file_names)
else:
ignored_names = set()
for is_dir, names in ((True, dir_names), (False, file_names)):
dont_walk = set()
for name in names:
if name in ignored_names:
if is_dir:
dont_walk.add(name)
continue
src_path = os.path.join(src_dir, name)
dst_path = os.path.join(dst_dir, name)
if os.path.islink(src_path):
rel_link = os.readlink(src)
abs_link = os.path.join(src_path, rel_link)
os.symlinks(abs_link, dst_path)
if is_dir:
dont_walk.add(name)
elif is_dir:
makedirs(dst_path)
else:
try:
os.link(src_path, dst_path)
except:
print 'Error during: os.link(%r, %r)' % (src_path, dst_path)
raise
if dont_walk:
names[:] = [x for x in names if x not in dont_walk]
class HashingWriter(object):
def __init__(self, fh, hasher=None):
self._fh = fh
self._hasher = hasher or hashlib.md5()
def write(self, data):
self._fh.write(data)
self._hasher.update(data)
def hexdigest(self):
return self._hasher.hexdigest()
def _checksum_file(path, hasher=None):
hasher = hasher or hashlib.md5()
with open(path, 'rb') as fh:
while True:
chunk = fh.read(16384)
if not chunk:
break
hasher.update(chunk)
return hasher.name, hasher.hexdigest()
def checksum_file(path, hasher=None):
return '%s:%s' % _checksum_file(path, hasher)
def assert_file_checksum(path, checksum):
m = re.match(r'^(md5|sha1)[:=]([0-9a-fA-F]+)$', checksum)
if not m:
raise ValueError('unknown checksum format %r' % checksum)
name, hash1 = m.groups()
_, hash2 = _checksum_file(path, getattr(hashlib, name)())
if hash1 != hash2:
raise ValueError('%s:%s does not match expected %s' % (name, hash2, checksum))
| {
"content_hash": "1d8c5d16eed8f43cc3f2852d8cc40212",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 93,
"avg_line_length": 31.14,
"alnum_prop": 0.5427103403982017,
"repo_name": "westernx/vee",
"id": "f264441f046cc5822ebd9103a193e4aa74468509",
"size": "4689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vee/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "590"
},
{
"name": "Makefile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "334426"
},
{
"name": "Ruby",
"bytes": "479"
},
{
"name": "Shell",
"bytes": "1027"
},
{
"name": "Smarty",
"bytes": "810"
}
],
"symlink_target": ""
} |
from metrics_reporter import MetricsReporter
__version__ = '1.0.0'
| {
"content_hash": "c4027f423a6449d40b2bf749ce21cf79",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7352941176470589,
"repo_name": "smartcat-labs/smartcat-os-metrics",
"id": "7cfe657722cbeea127c0ecdf379f633a1c03857c",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporter/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "13251"
}
],
"symlink_target": ""
} |
'''
COPYRIGHT and LICENSE:
---------------------
The MIT License (MIT)
Copyright (C) 2016 Sundar Nagarajan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# The following three can be changed
ALWAYS_AVOID = []
ALWAYS_AVOID_REPL = []
AUTOLOAD_ALL = True
# ------------------------------------------------------------------------
# ***************** Known bug ********************************************
# ------------------------------------------------------------------------
# If using AUTOLOAD_ALL=True, AND 'unicode' is not in ALWAYS_AVOID,
# pydoc2 (pydoc on python2) will fail for this module.
# To use pydoc on py23compat with AUTOLOAD_ALL=True, do one of the
# following:
#
# - Use pydoc3 in a python3 virtualenv
# - In a REPL, import py23compat and then do help(py23compat)
# - Call pydoc setting (shell) environment var PY23COMPAT_NO_AUTOLOAD)
# - Alias pydoc to 'PY23COMPAT_NO_AUTOLOAD=yes pydoc'
#
# Note that PY23COMPAT_NO_AUTOLOAD disables effect of AUTOLOAD_ALL in
# ALL conditions- not just for pydoc - there is no way to effect special
# behavior when being imported by pydoc
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Shouldn't have to change anything below this
# ------------------------------------------------------------------------
import sys
v_info = sys.version_info
(PY2, PY3) = (v_info.major == 2, v_info.major == 3)
PY_MINOR = v_info.minor
PYPY = (PY2 and sys.subversion[0].lower() == 'pypy')
del v_info
import warnings
if PY2 and PY_MINOR < 6:
warnings.warn('p23compat fixups will not work in Python2 < 2.6')
MARKER = '__inject_compat_done__'
globals()[MARKER] = True # Never need to inject into THIS module
IN_REPL = hasattr(sys, 'ps1')
future_imports = ['absolute_import', 'division',
'print_function', 'unicode_literals']
builtins_imports = [
'ascii', 'bytes', 'chr', 'dict', 'filter', 'hex', 'input',
'int', 'map', 'next', 'oct', 'open', 'pow', 'range', 'round',
'str', 'super', 'zip',
]
obsolete_imports = [
'apply', 'cmp', 'coerce', 'execfile', 'file', 'long', 'raw_input',
'reduce', 'reload', 'unicode', 'xrange', 'StandardError',
]
def is_str(x):
'''
Returns-->boolean
On Python 2 matches 'unicode' and 'str. On Python 3, matches ONLY 'str'
'''
if PY2:
return isinstance(x, unicode)
else:
return isinstance(x, str)
def is_int(x):
'''
Returns-->boolean
On Python 2 matches 'long' and 'int. On Python 3, matches ONLY 'int'
'''
if PY2:
return isinstance(x, (int, long))
else:
return isinstance(x, int)
def inject_compat(avoid=None):
'''
avoid-->LIST of strings: imports to avoid
This is in ADDITION to ALWAYS_AVOID above (non-REPL use) and in
ADDITION to ALWAYS_AVOID_REPL above (for REPL use).
'''
if PY2 and PY_MINOR < 6:
return
if avoid is None:
avoid = []
avoid_keys = dict.fromkeys(avoid)
avoid_keys.update(dict.fromkeys(ALWAYS_AVOID))
avoid_keys_repl = dict.fromkeys(avoid)
avoid_keys_repl.update(dict.fromkeys(ALWAYS_AVOID_REPL))
callerframe = sys._getframe(1)
# When in REPL, inject compatibility into top-most frame (also)
if IN_REPL:
topframe = callerframe
while topframe.f_back is not None:
topframe = topframe.f_back
framelist = [callerframe, topframe]
else:
topframe = None
framelist = [callerframe]
if AUTOLOAD_ALL and AUTOLOAD:
if callerframe.f_back is not None:
framelist += [callerframe.f_back]
d = get_import_dict()
for frame in framelist:
if MARKER in frame.f_globals:
continue
if topframe and IN_REPL:
avoid_dict = avoid_keys_repl
else:
avoid_dict = avoid_keys
for (k, v) in d.items():
if k not in avoid_dict:
frame.f_globals[k] = v
if frame is topframe and IN_REPL:
b = frame.f_globals['__builtins__']
if isinstance(b, dict):
b['is_int'] = is_int
b['is_str'] = is_str
else:
setattr(b, 'is_int', is_int)
setattr(b, 'is_str', is_str)
frame.f_globals[MARKER] = True
def import_to_dict(d, mod_name, name_list):
'''
d-->dict
mod_name-->str
name_list-->LIST of str
Returns-->Nothing: modifies d
Only intended to be called from get_import_dict
'''
import importlib
try:
mod = importlib.import_module(mod_name)
except:
return
for name in name_list:
if hasattr(mod, name):
d[name] = getattr(mod, name)
def get_import_dict():
'''
Returns-->dict
Only intended to be called by inject_compat
'''
import future # noqa: F401
import builtins # noqa: F401
ret = {}
import_to_dict(ret, '__future__', future_imports)
import_to_dict(ret, 'future.builtins', builtins_imports)
import_to_dict(ret, 'future.builtins.disabled', obsolete_imports)
return ret
# Inject compatibility by JUST importing this module
# but ONLY when running in interactive mode
AUTOLOAD = False
if IN_REPL:
inject_compat()
# Allow pydoc when using AUTOLOAD_ALL by setting env var
import os
if AUTOLOAD_ALL and 'PY23COMPAT_NO_AUTOLOAD' not in os.environ:
AUTOLOAD = True
inject_compat()
__doc__ = '''
Dependencies:
------------
Uses and requires installation of the excellent 'future' package.
Install with pip install future
This module has no other external dependencies. You can place it
anywhere on your PYTHONPATH
Does not work with Python2 < 2.6 - importing this module or calling
inject_compat() should have no effect in such cases
Four capabilities, three variables to set and one method:
--------------------------------------------------------
Capability Variable / Method
---------- --------
Customize features for source files ALWAYS_AVOID variable
Customize features for REPL ALWAYS_AVOID_REPL variable
Select features by just importing AUTOLOAD_ALL variable
Select features on per-file basis inject_compat method
AUTOLOAD_ALL versus calling inject_compat:
If you want to choose different features for different source
files, you NEED TO:
Add following TWO lines to the top of each source file
from py23compat import inject_compat
inject_compat()
You can avoid SOME features across ALL source files by
adding those features to ALWAYS_AVOID, and add ADDITIONAL
features to avoid on a per-file basis by adding avoid=[...]
to the inject_compat() call in each source file
In this model, you NEED to add TWO lines to each source file,
though you may not need the avoid=[..] in the call to
inject_compat in all source files.
If all your source files are in the SAME STAGE of Python-2-3
compatibility / migration, you can:
Disable some features across ALL source files using
ALWAYS_AVOID
Set AUTOLOAD_ALL = True
Load remaining features in each source file by JUST importing
py23compat using a line like:
import py23compat # noqa: F401
'# noqa: F401' asks PEP8 not to complain about an unused import
In this model, you CANNOT customize features on a per-file
basis.
The features avoided in the REPL are separate (ALWAYS_AVOID_REPL).
Features are ALWAYS injected into the REPL by JUST importing py23compat.
is_int and is_str methods:
-------------------------
Some modules (such as simplejson, but I am sure there are more), can
RETURN objects of type 'unicode' or 'long' in Python2. If you have
disabled 'unicode' and 'long' in Python2 (e.g. by having an empty
ALWAYS_AVOID list), then you have no way to check if the returned
valus is an instance of 'unicode' or 'long' respectively. In addition,
in such cases, the returned variable will NOT be an instance of
str | int respectively (although it's BEHAVIOR will be similar to those
respective types).
In such cases, you can use the is_str and is_int methods by importing
them from this module. On Python2 is_int will match int and long and
is_str will match str and unicode, while on Python3 is_int will match
only int and is_str will match only str.
You only need these methods if you would have otherwise used isinstance
for this purpose.
Python version variables:
------------------------
PY2-->boolean: Whether running in python2 (any minor version)
PY3-->boolean: Whether running in python3 (any minor version)
PYPY-->boolean: Whether running in pypy (any minor version)
PY_MINOR-->int: Python minor version
Variables, imports and what is injected:
---------------------------------------
REPL Variable import Effect
-----------------------------------------------------------------------
YES ALWAYS_AVOID_REPL plain import Except ALWAYS_AVOID_REPL
is_int and is_str automatic
YES ALWAYS_AVOID_REPL import + Except ALWAYS_AVOID_REPL
avoid param inject_compat avoid param has no effect
is_int, is_str need import
Source ALWAYS_AVOID plain import Except ALWAYS_AVOID
is_int, is_str need import
Source ALWAYS_AVOID import + Except ALWAYS_AVOID AND
avoid param inject_compat except avoid param
is_int, is_str need import
-----------------------------------------------------------------------
inject_compat ONLY injects compatibility names into:
Caller's stack frame
Top-most stack frame ONLY if running in REPL
Importing this module ONLY injects compatibility names into:
Top-most stack frame ONLY if running in REPL
Into caller's (importer's) stack frame if AUTOLOAD_ALL is set
When using the REPL, if ANY module IMPORTS py23compat, the
compatibility names will be injected into the TOP-MOST stack
frame (repeat: ONLY when using the REPL)
Note the difference between the CALLER (importer) stack frame and
the TOP-MOST stack frame.
Usage for NEW python packages starting from scratch:
---------------------------------------------------
A. Make a COPY of this module (file) for EACH package - it has
variables at the top that you CAN (and SHOULD) change to reflect
the stage the package is in (in terms of Python2-3 compatibility)
B. For a NEW package started from scratch, I recommend:
1. Keep ALWAYS_AVOID and ALWAYS_AVOID_REPL EMPTY
2. Set AUTOLOAD_ALL = True
Allows injecting compatibility code by JUST importing py23compat
Still obeys ALWAYS_AVOID and ALWAYS_AVOID_REPL
if you need them
3. Write your package using (only) Python3 idioms and constructs.
It should run unchanged in Python2 (need to pip install future)
Usage for making existing Python2 packages compatible with Python2-3:
--------------------------------------------------------------------
A. Make a COPY of this module (file) for EACH package - it has
variables at the top that you CAN (and SHOULD) change to reflect
the stage the package is in (in terms of Python2-3 compatibility)
B. Make a list of all the changed features and obsoleted features
being used by your package.
See future_imports for NEW behavior in Python3
See builtins_imports for CHANGED behavior
See obsolete_imports for OBSOLETED classes, methods
E. Use one of the following strategies. Note there are MANY possible
strategies, and the python-future website has a much more robust
and complete discussion of migration strategies.
Feature-by-feature
1. Start by adding ALL the features your package is using that has
been changed or obsoleted in Python3 to ALWAYS_AVOID.
2. If you regularly explore your package interactively using a REPL,
add JUST the changed features (not the obsoleted features) to
ALWAYS_AVOID_REPL also.
3. Go through your package feature-by-feature and once a
feature has been upgraded across your package, remove it
from ALWAYS_AVOID and ALWAYS_AVOID_REPL.
4. During this time, you can keep AUTOLOAD_ALL = True and
JUST import py23compat at the top of each source file
5. Once you have emptied ALWAYS_AVOID and ALWAYS_AVOID_REPL,
your package should run unchanged in Python 2 and 3
File-by-file
1. Add two lines at the top of each source file:
from py23compat import inject_compat
inject_compat(avoid=xyz)
where xyz is a set of features to disable for that file
2. In each file, upgrade each disabled feature and then
remove it from the avoid list
3. Once the avoid list is empty, you can change the two lines
at the top to be just:
import py23compat
4. Once all files have been upgraded and have an empty avoid list,
your package should run unchanged in Python 2 and 3
Known bug: interaction with pydoc on Python2:
--------------------------------------------
If using AUTOLOAD_ALL=True, AND 'unicode' is not in ALWAYS_AVOID,
pydoc2 (pydoc on python2) will fail for this module.
To use pydoc on py23compat with AUTOLOAD_ALL=True, do one of the
following:
- Use pydoc3 in a python3 virtualenv
- In a REPL, import py23compat and then do help(py23compat)
- Call pydoc setting (shell) environment var PY23COMPAT_NO_AUTOLOAD)
- Alias pydoc to 'PY23COMPAT_NO_AUTOLOAD=yes pydoc'
Note that PY23COMPAT_NO_AUTOLOAD disables effect of AUTOLOAD_ALL in
ALL conditions- not just for pydoc - there is no way to effect special
behavior when being imported by pydoc
More information on python-future and Python2-3 compatibility:
-------------------------------------------------------------
See the python-future site for more information on writing python
programs that can run in Python 2.x or 3.x, Python 2-->3 migration
and the python-future package.
Python-future Quick Start Guide: http://python-future.org/quickstart.html
Idioms for writing Python 2-3 compatible code:
http://python-future.org/compatible_idioms.html
Importing explicitly:
http://python-future.org/imports.html#explicit-imports
The contents of future_imports, builtins_imports and obsolete_imports
are below. These are the imports that can be customized with the 'avoid'
keyword to inject_compat:
'''
__doc__ += '''
future_imports = %s
builtins_imports = %s
obsolete_imports = %s
''' % (
str(future_imports), str(builtins_imports), str(obsolete_imports)
)
| {
"content_hash": "be5e582b579115f122c942c35433e26f",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 80,
"avg_line_length": 38.125290023201856,
"alnum_prop": 0.6189143135345667,
"repo_name": "sundarnagarajan/py23compat",
"id": "d73cf359914b7c24f2b6a3ec9827bb76b4160e0b",
"size": "16432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py23compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16432"
}
],
"symlink_target": ""
} |
"""Support for Velbus devices."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_PORT
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['python-velbus==2.0.21']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'velbus'
VELBUS_MESSAGE = 'velbus.message'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_PORT): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Velbus platform."""
import velbus
port = config[DOMAIN].get(CONF_PORT)
controller = velbus.Controller(port)
hass.data[DOMAIN] = controller
def stop_velbus(event):
"""Disconnect from serial port."""
_LOGGER.debug("Shutting down ")
controller.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_velbus)
def callback():
modules = controller.get_modules()
discovery_info = {
'switch': [],
'binary_sensor': [],
'climate': [],
'sensor': []
}
for module in modules:
for channel in range(1, module.number_of_channels() + 1):
for category in discovery_info:
if category in module.get_categories(channel):
discovery_info[category].append((
module.get_module_address(),
channel
))
load_platform(hass, 'switch', DOMAIN,
discovery_info['switch'], config)
load_platform(hass, 'climate', DOMAIN,
discovery_info['climate'], config)
load_platform(hass, 'binary_sensor', DOMAIN,
discovery_info['binary_sensor'], config)
load_platform(hass, 'sensor', DOMAIN,
discovery_info['sensor'], config)
controller.scan(callback)
return True
class VelbusEntity(Entity):
"""Representation of a Velbus entity."""
def __init__(self, module, channel):
"""Initialize a Velbus entity."""
self._module = module
self._channel = channel
@property
def unique_id(self):
"""Get unique ID."""
serial = 0
if self._module.serial == 0:
serial = self._module.get_module_address()
else:
serial = self._module.serial
return "{}-{}".format(serial, self._channel)
@property
def name(self):
"""Return the display name of this entity."""
return self._module.get_name(self._channel)
@property
def should_poll(self):
"""Disable polling."""
return False
async def async_added_to_hass(self):
"""Add listener for state changes."""
self._module.on_status_update(self._channel, self._on_update)
def _on_update(self, state):
self.schedule_update_ha_state()
| {
"content_hash": "f8c54d483e6d311262206db4d4ae7bc1",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 69,
"avg_line_length": 29.563106796116504,
"alnum_prop": 0.5855500821018063,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "38d8b6c3f1cc52d1bf4e68c19cab193f8019adab",
"size": "3045",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/velbus/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
import re
import sys
from .base import BaseMatcher
from .path import PathMatcher
from .query import QueryMatcher
from ..regex import isregex
if sys.version_info < (3,): # Python 2
from urlparse import urlparse
else: # Python 3
from urllib.parse import urlparse
# URI protocol test regular expression
protoregex = re.compile('^http[s]?://', re.IGNORECASE)
class URLMatcher(BaseMatcher):
"""
URLMatcher implements an URL schema matcher.
"""
# Matches URL as regular expression
regex = False
def __init__(self, url):
if not url:
raise ValueError('url argument cannot be empty')
# Store original URL value
self.url = url
# Process as regex value
if isregex(url):
self.regex = True
self.expectation = url
else:
# Add protocol prefix in the URL
if not protoregex.match(url):
self.url = 'http://{}'.format(url)
self.expectation = urlparse(self.url)
def match_path(self, req):
path = self.expectation.path
if not path:
return True
return PathMatcher(path).match(req)
def match_query(self, req):
query = self.expectation.query
if not query:
return True
return QueryMatcher(query).match(req)
@BaseMatcher.matcher
def match(self, req):
url = self.expectation
# Match as regex
if self.regex:
return self.compare(url, req.url.geturl(), regex_expr=True)
# Match URL
return all([
self.compare(url.scheme, req.url.scheme),
self.compare(url.hostname, req.url.hostname),
self.compare(url.port or req.url.port, req.url.port),
self.match_path(req),
self.match_query(req)
])
def __str__(self):
return self.url
def __repr__(self):
return '{}({})'.format(self.name, self.url)
| {
"content_hash": "86ce588b859dacb6e23f058f9f1a48c0",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 26.64,
"alnum_prop": 0.5790790790790791,
"repo_name": "h2non/pook",
"id": "8939a55eb176bd844d734cbf4f5fe90b648363f2",
"size": "1998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pook/matchers/url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1528"
},
{
"name": "Python",
"bytes": "132129"
}
],
"symlink_target": ""
} |
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import sys
import stat
import subprocess
import time
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not _iscommand(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
if sys.platform[:3] == "win":
def _isexecutable(cmd):
cmd = cmd.lower()
if os.path.isfile(cmd) and cmd.endswith((".exe", ".bat")):
return True
for ext in ".exe", ".bat":
if os.path.isfile(cmd + ext):
return True
return False
else:
def _isexecutable(cmd):
if os.path.isfile(cmd):
mode = os.stat(cmd)[stat.ST_MODE]
if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
return True
return False
def _iscommand(cmd):
"""Return True if cmd is executable or can be found on the executable
search path."""
if _isexecutable(cmd):
return True
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if _isexecutable(exe):
return True
return False
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, basestring):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
devnull = file(os.devnull, "r+")
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline, stdout=devnull, stderr=devnull)
else:
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid,
stdout=devnull, stderr=devnull)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
background = False
redirect_stdout = True
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = file(os.devnull, "r+")
else:
# for TTY browsers, we need stdin/out
inout = None
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, preexec_fn=setsid)
if remote:
# wait five seconds. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
time.sleep(1)
rc = p.poll()
if rc is None:
time.sleep(4)
rc = p.poll()
if rc is None:
return True
# if remote call failed, open() will try direct invocation
return not rc
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=True):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla/Netscape browsers."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
Netscape = Mozilla
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Chrome(UnixBrowser):
"Launcher class for Google Chrome browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
Chromium = Chrome
class Opera(UnixBrowser):
"Launcher class for Opera browser."
raise_opts = ["-noraise", ""]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-page"
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=True):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = file(os.devnull, "r+")
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except socket.error:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except IOError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=True):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# use xdg-open if around
if _iscommand("xdg-open"):
register("xdg-open", None, BackgroundBrowser("xdg-open"))
# The default GNOME3 browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gvfs-open"):
register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and _iscommand("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
if _iscommand("x-www-browser"):
register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
# The Mozilla/Netscape browsers
for browser in ("mozilla-firefox", "firefox",
"mozilla-firebird", "firebird",
"iceweasel", "iceape",
"seamonkey", "mozilla", "netscape"):
if _iscommand(browser):
register(browser, None, Mozilla(browser))
# Konqueror/kfm, the KDE browser.
if _iscommand("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif _iscommand("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if _iscommand(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if _iscommand("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Google Chrome/Chromium browsers
for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
if _iscommand(browser):
register(browser, None, Chrome(browser))
# Opera, quite popular
if _iscommand("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if _iscommand("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if _iscommand("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
if _iscommand("www-browser"):
register("www-browser", None, GenericBrowser("www-browser"))
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if _iscommand("links"):
register("links", None, GenericBrowser("links"))
if _iscommand("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if _iscommand("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if _iscommand("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=True):
try:
os.startfile(url)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if _iscommand(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=True):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
class MacOSXOSAScript(BaseBrowser):
def __init__(self, name):
self._name = name
def open(self, url, new=0, autoraise=True):
if self._name == 'default':
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
script = '''
tell application "%s"
activate
open location "%s"
end
'''%(self._name, url.replace('"', '%22'))
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("safari", None, MacOSXOSAScript('safari'), -1)
register("firefox", None, MacOSXOSAScript('firefox'), -1)
register("MacOSX", None, MacOSXOSAScript('default'), -1)
#
# Platform support for OS/2
#
if sys.platform[:3] == "os2" and _iscommand("netscape"):
_tryorder = []
_browsers = {}
register("os2netscape", None,
GenericBrowser(["start", "netscape", "%s"]), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, -1)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error, msg:
print >>sys.stderr, msg
print >>sys.stderr, usage
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print >>sys.stderr, usage
sys.exit(1)
url = args[0]
open(url, new_win)
print "\a"
if __name__ == "__main__":
main()
| {
"content_hash": "6ed3357d8417feee07f20a68e750d97c",
"timestamp": "",
"source": "github",
"line_count": 705,
"max_line_length": 98,
"avg_line_length": 32.40709219858156,
"alnum_prop": 0.5643191666301921,
"repo_name": "cthlo/exoduscli",
"id": "1be332e5a87fd7d5ef0b9b43f3d1104c38084b74",
"size": "22870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exoduscli/lib/webbrowser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89851"
}
],
"symlink_target": ""
} |
"""Manage evidence codes as reported by the Gene Ontology Consortium."""
__copyright__ = "Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import sys
import collections as cx
# pylint: disable=line-too-long
class EvidenceCodes(object):
"""From http://geneontology.org/page/guide-go-evidence-codes"""
# gocwiki.geneontology.org/index.php/Evidence_Code_Ontology_%28ECO%29
ntobj = cx.namedtuple("NtCode", "eco group name")
code2nt = cx.OrderedDict([
# Experimental Evidence codes:
("EXP", ntobj._make(["ECO:0000269", "Experimental", "Inferred from Experiment"])),
("IDA", ntobj._make(["ECO:0000314", "Experimental", "Inferred from Direct Assay"])),
("IPI", ntobj._make(["ECO:0000353", "Experimental", "Inferred from Physical Interaction"])),
("IMP", ntobj._make(["ECO:0000315", "Experimental", "Inferred from Mutant Phenotype"])),
("IGI", ntobj._make(["ECO:0000316", "Experimental", "Inferred from Genetic Interaction"])),
("IEP", ntobj._make(["ECO:0000270", "Experimental", "Inferred from Expression Pattern"])),
# Similarity evidence codes
("ISS", ntobj._make(["ECO:0000250", "Similarity", "Inferred from Sequence or structural Similarity"])),
("ISO", ntobj._make(["ECO:0000266", "Similarity", "Inferred from Sequence Orthology"])),
("ISA", ntobj._make(["ECO:0000247", "Similarity", "Inferred from Sequence Alignment"])),
("ISM", ntobj._make(["ECO:0000255", "Similarity", "Inferred from Sequence Model used in manual assertion"])),
("IGC", ntobj._make(["ECO:0000317", "Similarity", "Inferred from Genomic Context"])),
("IBA", ntobj._make(["ECO:0000318", "Similarity", "Inferred from Biological aspect of Ancestor"])),
("IBD", ntobj._make(["ECO:0000319", "Similarity", "Inferred from Biological aspect of Descendant"])),
("IKR", ntobj._make(["ECO:0000320", "Similarity", "Inferred from phylogenetic determination of loss of key residues (manual assertion)"])),
("IRD", ntobj._make(["ECO:0000321", "Similarity", "Inferred from Rapid Divergence from ancestral sequence (manual assertion)"])),
("IMR", ntobj._make(["ECO:0000320", "Similarity", "Phylogenetic determination of loss of key residues in manual assertion"])),
# Combinatorial evidence codes
("RCA", ntobj._make(["ECO:0000245", "Combinatorial", "Inferred from Reviewed Computational Analysis"])),
# High Throughput Experimental evidence codes
("HTP", ntobj._make(["ECO:0006056", "High_Throughput", "Inferred from High Throughput Experimental"])),
("HDA", ntobj._make(["ECO:0007005", "High_Throughput", "Inferred from High Throughput Direct Assay"])),
("HMP", ntobj._make(["ECO:0007001", "High_Throughput", "Inferred from High Throughput Mutant Phenotype"])),
("HGI", ntobj._make(["ECO:0007003", "High_Throughput", "Inferred from High Throughput Genetic Interaction"])),
("HEP", ntobj._make(["ECO:0007007", "High_Throughput", "Inferred from High Throughput Expression Pattern"])),
# Author Statement evidence codes
("TAS", ntobj._make(["ECO:0000304", "Author", "Traceable Author Statement used in manual assertion"])),
("NAS", ntobj._make(["ECO:0000303", "Author", "Non-traceable Author Statement used in manual assertion"])),
# Curator Inference
("IC", ntobj._make(["ECO:0000305", "Curatorial", "Inferred by Curator"])),
# No Biological Data
("ND", ntobj._make(["ECO:0000307", "No biological data", "No biological Data available"])),
# Automatic Assertion
("IEA", ntobj._make(["ECO:0000501", "Automatic", "Inferred from Electronic Annotation"]))])
ev2idx = {ev:i for i, ev in enumerate(code2nt.keys())}
def __init__(self):
_ini = _Init(self.code2nt)
self.grp2codes = _ini.get_grp2codes()
self.grp2code2nt = _ini.get_grp2code2nt()
def prt_summary_code(self, prt=sys.stdout):
"""Print summary of codes and groups that can be inputs to get_evcodes."""
prt.write('{N} EVIDENCE GROUPS AND {M} CODES:\n'.format(N=len(self.grp2code2nt), M=len(self.code2nt)))
for grp, c2nt in self.grp2code2nt.items():
prt.write(' {GRP:19}: {CODES}\n'.format(GRP=grp, CODES=' '.join(c2nt.keys())))
def prt_details(self, prt=sys.stdout):
"""Print summary of codes and groups that can be inputs to get_evcodes."""
prt.write('EVIDENCE CODES:\n')
for grp, code2nt in self.grp2code2nt.items():
prt.write(' {GROUP}:\n'.format(GROUP=grp))
for code, ntd in code2nt.items():
prt.write(' {CODE:>3} {NAME}\n'.format(CODE=code, NAME=ntd.name))
def get_min_inc_exc(self, inc_set=None, exc_set=None):
"""Get the user-specified Evidence codes. Return smaller set: include/exclude"""
if inc_set is None and exc_set is None:
return {}
inc = self.get_evcodes(inc_set, exc_set)
exc = set(self.code2nt.keys()).difference(inc)
return {'inc':inc} if len(inc) <= len(exc) else {'exc': exc}
def get_evcodes(self, inc_set=None, exc_set=None):
"""Get evidence code for all but NOT 'No biological data'"""
codes = self.get_evcodes_all(inc_set, exc_set)
codes.discard('ND')
return codes
def get_evcodes_all(self, inc_set=None, exc_set=None):
"""Get set of evidence codes given include set and exclude set"""
codes = self._get_grps_n_codes(inc_set) if inc_set else set(self.code2nt)
if exc_set:
codes.difference_update(self._get_grps_n_codes(exc_set))
return codes
def _get_grps_n_codes(self, usr_set):
"""Get codes, given codes or groups."""
codes = usr_set.intersection(self.code2nt)
for grp in usr_set.intersection(self.grp2codes):
codes.update(self.grp2codes[grp])
return codes
def sort_nts(self, nt_list, codekey):
"""Sort list of namedtuples such so evidence codes in same order as code2nt."""
# Problem is that some members in the nt_list do NOT have
# codekey=EvidenceCode, then it returns None, which breaks py34 and 35
# The fix here is that for these members, default to -1 (is this valid?)
sortby = lambda nt: self.ev2idx.get(getattr(nt, codekey), -1)
return sorted(nt_list, key=sortby)
def get_grp_name(self, code):
"""Return group and name for an evidence code."""
nt_code = self.code2nt.get(code.strip(), None)
if nt_code is not None:
return nt_code.group, nt_code.name
return "", ""
def prt_ev_cnts(self, ctr, prt=sys.stdout):
"""Prints evidence code counts stored in a collections Counter."""
for key, cnt in ctr.most_common():
grp, name = self.get_grp_name(key.replace("NOT ", ""))
prt.write("{CNT:7,} {EV:>7} {GROUP:<15} {NAME}\n".format(
CNT=cnt, EV=key, GROUP=grp, NAME=name))
def get_order(self, codes):
"""Return evidence codes in order shown in code2name."""
return sorted(codes, key=lambda e: [self.ev2idx.get(e)])
def prt_summary_anno2ev(self, associations, prt=sys.stdout):
"""Print annotation/evidence code summary."""
ctr = cx.Counter()
for ntanno in associations:
evidence_code = ntanno.Evidence_Code
if 'NOT' not in ntanno.Qualifier:
ctr[evidence_code] += 1
elif 'NOT' in ntanno.Qualifier:
ctr["NOT {EV:3}".format(EV=ntanno.Evidence_Code)] += 1
else:
raise Exception("UNEXPECTED INFO")
self.prt_ev_cnts(ctr, prt)
class _Init(object):
"""Initialize various formats of evidence codes."""
def __init__(self, code2nt):
self.code2nt = code2nt
self.grps = self._init_grps(code2nt)
def get_grp2code2nt(self):
"""Return ordered dict for group to namedtuple"""
grp2code2nt = cx.OrderedDict([(g, []) for g in self.grps])
for code, ntd in self.code2nt.items():
grp2code2nt[ntd.group].append((code, ntd))
for grp, nts in grp2code2nt.items():
grp2code2nt[grp] = cx.OrderedDict(nts)
return grp2code2nt
@staticmethod
def _init_grps(code2nt):
"""Return list of groups in same order as in code2nt"""
seen = set()
seen_add = seen.add
groups = [nt.group for nt in code2nt.values()]
return [g for g in groups if not (g in seen or seen_add(g))]
def get_grp2codes(self):
"""Get dict of group name to namedtuples."""
grp2codes = cx.defaultdict(set)
for code, ntd in self.code2nt.items():
grp2codes[ntd.group].add(code)
return dict(grp2codes)
# Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
| {
"content_hash": "fff76345d237ac0453ae911ad5752276",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 147,
"avg_line_length": 49.447513812154696,
"alnum_prop": 0.6244692737430168,
"repo_name": "tanghaibao/goatools",
"id": "cc49d50318d93f518b767023d1347bcc412310a5",
"size": "8950",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "goatools/evidence_codes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "316670"
},
{
"name": "Makefile",
"bytes": "25213"
},
{
"name": "Python",
"bytes": "146769147"
},
{
"name": "Shell",
"bytes": "1107"
}
],
"symlink_target": ""
} |
import subprocess
import socket
import ssl
import sys
import time
if sys.version < '2.7':
print("WARNING: SSL not supported on Python 2.6")
exit(0)
if ssl.OPENSSL_VERSION_NUMBER < 0x10000000:
print("WARNING: TLS-PSK not supported on OpenSSL < 1.0")
exit(0)
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("no-psk-test-client", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "psk/test", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish(topic="psk/test", payload="message", qos=0)
broker = subprocess.Popen(['../../src/mosquitto', '-c', '08-tls-psk-bridge.conf'], stderr=subprocess.PIPE)
bridge = subprocess.Popen(['../../src/mosquitto', '-c', '08-tls-psk-bridge.conf2'], stderr=subprocess.PIPE)
pub = None
try:
time.sleep(0.5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(30)
sock.connect(("localhost", 1888))
sock.send(connect_packet)
if mosq_test.expect_packet(sock, "connack", connack_packet):
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
pub = subprocess.Popen(['./c/08-tls-psk-bridge.test'], env=env, stdout=subprocess.PIPE)
if pub.wait():
raise ValueError
exit(1)
if mosq_test.expect_packet(sock, "publish", publish_packet):
rc = 0
finally:
broker.terminate()
broker.wait()
bridge.terminate()
bridge.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
(stdo, stde) = bridge.communicate()
print(stde)
if pub:
(stdo, stde) = pub.communicate()
print(stdo)
exit(rc)
| {
"content_hash": "0e8beb415af56c293bb8888b29cab986",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 129,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.641566265060241,
"repo_name": "zhkzyth/better-mosquitto",
"id": "8d839e2f95ccd411582bc873ff5d928ce6704703",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/broker/08-tls-psk-bridge.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "723187"
},
{
"name": "C++",
"bytes": "34223"
},
{
"name": "JavaScript",
"bytes": "8597"
},
{
"name": "Perl",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "265033"
},
{
"name": "Shell",
"bytes": "3991"
},
{
"name": "XSLT",
"bytes": "1151"
}
],
"symlink_target": ""
} |
import re
from random import randrange
from model.contact import Contact
def test_contact_data_on_home_page(app):
check_contact_list_is_not_empty(app)
contacts_list = app.contact.get_contact_list()
index = randrange(len(contacts_list))
contact_from_home_page = contacts_list[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.lastname == remove_spaces_at_the_beginning(contact_from_edit_page.lastname)
assert contact_from_home_page.firstname == remove_spaces_at_the_beginning(contact_from_edit_page.firstname)
assert contact_from_home_page.address == remove_spaces_at_the_beginning(contact_from_edit_page.address)
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
def test_phones_on_view_page(app):
contact_from_view_page = app.contact.get_contact_info_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.homephone == contact_from_edit_page.homephone
assert contact_from_view_page.workphone == contact_from_edit_page.workphone
assert contact_from_view_page.mobilephone == contact_from_edit_page.mobilephone
assert contact_from_view_page.secondaryphone == contact_from_edit_page.secondaryphone
def clear(s):
return re.sub("[() -]","",s)
def remove_spaces_at_the_beginning(s):
return re.sub("^\s*","",s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter (lambda x: x != "",
map (lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone, contact.workphone, contact.secondaryphone]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter (lambda x: x != "",
map (lambda x: remove_spaces_at_the_beginning(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
def check_contact_list_is_not_empty(app):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="test_firstname", middlename="test_middlename", lastname="test_lastname",
homephone="1234560", mobilephone="+367846", workphone="234234234",
secondaryphone="2(343)4532", address = "test_address", email="email@test.tst",
email2=" email2@test.tst", email3="email3@test.tst")) | {
"content_hash": "221589c900a5bb5eda698b79f3da69e2",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 128,
"avg_line_length": 57.5,
"alnum_prop": 0.6514492753623189,
"repo_name": "tzvezda/python_training",
"id": "ec6a295429abfcbeec99bf69b28aad466654fb8f",
"size": "2760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_data_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30452"
}
],
"symlink_target": ""
} |
"""
CLI interface for nova management.
"""
from __future__ import print_function
import argparse
import functools
import re
import sys
import traceback
from dateutil import parser as dateutil_parser
import decorator
from keystoneauth1 import exceptions as ks_exc
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import uuidutils
import prettytable
import six
import six.moves.urllib.parse as urlparse
from sqlalchemy.engine import url as sqla_url
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.cmd import common as cmd_common
from nova.compute import api as compute_api
import nova.conf
from nova import config
from nova import context
from nova.db import api as db
from nova.db import migration
from nova.db.sqlalchemy import api as sa_db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import build_request as build_request_obj
from nova.objects import host_mapping as host_mapping_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_mapping as instance_mapping_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.objects import request_spec
from nova import quota
from nova import rpc
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova import version
from nova.virt import ironic
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
# Keep this list sorted and one entry per line for readability.
_EXTRA_DEFAULT_LOG_LEVELS = ['oslo_concurrency=INFO',
'oslo_db=INFO',
'oslo_policy=INFO']
# Decorators for actions
args = cmd_common.args
action_description = cmd_common.action_description
def mask_passwd_in_url(url):
parsed = urlparse.urlparse(url)
safe_netloc = re.sub(':.*@', ':****@', parsed.netloc)
new_parsed = urlparse.ParseResult(
parsed.scheme, safe_netloc,
parsed.path, parsed.params,
parsed.query, parsed.fragment)
return urlparse.urlunparse(new_parsed)
def _db_error(caught_exception):
print(caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'nova-manage db sync' before running this command."))
sys.exit(1)
class FloatingIpCommands(object):
"""Class for managing floating IP."""
# TODO(stephenfin): Remove these when we remove cells v1
description = ('DEPRECATED: Floating IP commands are deprecated since '
'nova-network is deprecated in favor of Neutron. The '
'floating IP commands will be removed in an upcoming '
'release.')
@staticmethod
def address_to_hosts(addresses):
"""Iterate over hosts within an address range.
If an explicit range specifier is missing, the parameter is
interpreted as a specific individual address.
"""
try:
return [netaddr.IPAddress(addresses)]
except ValueError:
net = netaddr.IPNetwork(addresses)
if net.size < 4:
reason = _("/%s should be specified as single address(es) "
"not in cidr format") % net.prefixlen
raise exception.InvalidInput(reason=reason)
elif net.size >= 1000000:
# NOTE(dripton): If we generate a million IPs and put them in
# the database, the system will slow to a crawl and/or run
# out of memory and crash. This is clearly a misconfiguration.
reason = _("Too many IP addresses will be generated. Please "
"increase /%s to reduce the number generated."
) % net.prefixlen
raise exception.InvalidInput(reason=reason)
else:
return net.iter_hosts()
@args('--ip_range', metavar='<range>', help='IP range')
@args('--pool', metavar='<pool>', help='Optional pool')
@args('--interface', metavar='<interface>', help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
"""Creates floating IPs for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
if not interface:
interface = CONF.public_interface
ips = [{'address': str(address), 'pool': pool, 'interface': interface}
for address in self.address_to_hosts(ip_range)]
try:
db.floating_ip_bulk_create(admin_context, ips, want_result=False)
except exception.FloatingIpExists as exc:
# NOTE(simplylizz): Maybe logging would be better here
# instead of printing, but logging isn't used here and I
# don't know why.
print('error: %s' % exc)
return 1
@args('--ip_range', metavar='<range>', help='IP range')
def delete(self, ip_range):
"""Deletes floating IPs by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
for address in self.address_to_hosts(ip_range))
db.floating_ip_bulk_destroy(admin_context, ips)
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all floating IPs (optionally by host).
Note: if host is given, only active floating IPs are returned
"""
ctxt = context.get_admin_context()
try:
if host is None:
floating_ips = db.floating_ip_get_all(ctxt)
else:
floating_ips = db.floating_ip_get_all_by_host(ctxt, host)
except exception.NoFloatingIpsDefined:
print(_("No floating IP addresses have been defined."))
return
for floating_ip in floating_ips:
instance_uuid = None
if floating_ip['fixed_ip_id']:
fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id'])
instance_uuid = fixed_ip['instance_uuid']
print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'],
floating_ip['address'],
instance_uuid,
floating_ip['pool'],
floating_ip['interface']))
@decorator.decorator
def validate_network_plugin(f, *args, **kwargs):
"""Decorator to validate the network plugin."""
if utils.is_neutron():
print(_("ERROR: Network commands are not supported when using the "
"Neutron API. Use python-neutronclient instead."))
return 2
return f(*args, **kwargs)
class NetworkCommands(object):
"""Class for managing networks."""
# TODO(stephenfin): Remove these when we remove cells v1
description = ('DEPRECATED: Network commands are deprecated since '
'nova-network is deprecated in favor of Neutron. The '
'network commands will be removed in an upcoming release.')
@validate_network_plugin
@args('--label', metavar='<label>', help='Label for network (ex: public)')
@args('--fixed_range_v4', dest='cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet (ex: 10.0.0.0/8)')
@args('--num_networks', metavar='<number>',
help='Number of networks to create')
@args('--network_size', metavar='<number>',
help='Number of IPs per network')
@args('--vlan', metavar='<vlan id>', help='vlan id')
@args('--vlan_start', dest='vlan_start', metavar='<vlan start id>',
help='vlan start id')
@args('--vpn', dest='vpn_start', help='vpn start')
@args('--fixed_range_v6', dest='cidr_v6',
help='IPv6 subnet (ex: fe80::/64')
@args('--gateway', help='gateway')
@args('--gateway_v6', help='ipv6 gateway')
@args('--bridge', metavar='<bridge>',
help='VIFs on this network are connected to this bridge')
@args('--bridge_interface', metavar='<bridge interface>',
help='the bridge is connected to this interface')
@args('--multi_host', metavar="<'T'|'F'>",
help='Multi host')
@args('--dns1', metavar="<DNS Address>", help='First DNS')
@args('--dns2', metavar="<DNS Address>", help='Second DNS')
@args('--uuid', metavar="<network uuid>", help='Network UUID')
@args('--fixed_cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)')
@args('--project_id', metavar="<project id>",
help='Project id')
@args('--priority', metavar="<number>", help='Network interface priority')
def create(self, label=None, cidr=None, num_networks=None,
network_size=None, multi_host=None, vlan=None,
vlan_start=None, vpn_start=None, cidr_v6=None, gateway=None,
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
"""Creates fixed IPs for host by range."""
# NOTE(gmann): These checks are moved here as API layer does all these
# validation through JSON schema.
if not label:
raise exception.NetworkNotCreated(req="label")
if len(label) > 255:
raise exception.LabelTooLong()
if not (cidr or cidr_v6):
raise exception.NetworkNotCreated(req="cidr or cidr_v6")
kwargs = {k: v for k, v in locals().items()
if v and k != "self"}
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
net_manager = importutils.import_object(CONF.network_manager)
net_manager.create_networks(context.get_admin_context(), **kwargs)
@validate_network_plugin
def list(self):
"""List all created networks."""
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print(_fmt % (_('id'),
_('IPv4'),
_('IPv6'),
_('start address'),
_('DNS1'),
_('DNS2'),
_('VlanID'),
_('project'),
_("uuid")))
try:
# Since network_get_all can throw exception.NoNetworksFound
# for this command to show a nice result, this exception
# should be caught and handled as such.
networks = db.network_get_all(context.get_admin_context())
except exception.NoNetworksFound:
print(_('No networks found'))
else:
for network in networks:
print(_fmt % (network.id,
network.cidr,
network.cidr_v6,
network.dhcp_start,
network.dns1,
network.dns2,
network.vlan,
network.project_id,
network.uuid))
@validate_network_plugin
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to delete')
@args('--uuid', metavar='<uuid>', help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
"""Deletes a network."""
if fixed_range is None and uuid is None:
raise Exception(_("Please specify either fixed_range or uuid"))
net_manager = importutils.import_object(CONF.network_manager)
# delete the network
net_manager.delete_network(context.get_admin_context(),
fixed_range, uuid)
@validate_network_plugin
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to modify')
@args('--project', metavar='<project name>',
help='Project name to associate')
@args('--host', metavar='<host>', help='Host to associate')
@args('--disassociate-project', action="store_true", dest='dis_project',
default=False, help='Disassociate Network from Project')
@args('--disassociate-host', action="store_true", dest='dis_host',
default=False, help='Disassociate Host from Project')
def modify(self, fixed_range, project=None, host=None,
dis_project=None, dis_host=None):
"""Associate/Disassociate Network with Project and/or Host
arguments: network project host
leave any field blank to ignore it
"""
admin_context = context.get_admin_context()
network = db.network_get_by_cidr(admin_context, fixed_range)
net = {}
# User can choose the following actions each for project and host.
# 1) Associate (set not None value given by project/host parameter)
# 2) Disassociate (set None by disassociate parameter)
# 3) Keep unchanged (project/host key is not added to 'net')
if dis_project:
net['project_id'] = None
if dis_host:
net['host'] = None
# The --disassociate-X are boolean options, but if they user
# mistakenly provides a value, it will be used as a positional argument
# and be erroneously interpreted as some other parameter (e.g.
# a project instead of host value). The safest thing to do is error-out
# with a message indicating that there is probably a problem with
# how the disassociate modifications are being used.
if dis_project or dis_host:
if project or host:
error_msg = "ERROR: Unexpected arguments provided. Please " \
"use separate commands."
print(error_msg)
return 1
db.network_update(admin_context, network['id'], net)
return
if project:
net['project_id'] = project
if host:
net['host'] = host
db.network_update(admin_context, network['id'], net)
class DbCommands(object):
"""Class for managing the main database."""
# NOTE(danms): These functions are called with a DB context and a
# count, which is the maximum batch size requested by the
# user. They must be idempotent. At most $count records should be
# migrated. The function must return a tuple of (found, done). The
# found value indicates how many unmigrated records existed in the
# database prior to the migration (either total, or up to the
# $count limit provided), and a nonzero found value tells the user
# that there is still work to do. The done value indicates whether
# or not any records were actually migrated by the function. Thus
# if both (found, done) are nonzero, work was done and some work
# remains. If found is nonzero and done is zero, some records are
# not migratable, but all migrations that can complete have
# finished.
online_migrations = (
# Added in Newton
# TODO(mriedem): Remove this in Stein along with the compatibility
# code in the api and conductor services; the nova-status upgrade check
# added in Rocky is the tool operators can use to make sure they have
# completed this migration.
request_spec.migrate_instances_add_request_spec,
# Added in Newton
keypair_obj.migrate_keypairs_to_api_db,
# Added in Ocata
# NOTE(mriedem): This online migration is going to be backported to
# Newton also since it's an upgrade issue when upgrading from Mitaka.
build_request_obj.delete_build_requests_with_no_instance_uuid,
# Added in Pike
db.service_uuids_online_data_migration,
# Added in Pike
quotas_obj.migrate_quota_limits_to_api_db,
# Added in Pike
quotas_obj.migrate_quota_classes_to_api_db,
# Added in Queens
sa_db.migration_migrate_to_uuid,
# Added in Queens
block_device_obj.BlockDeviceMapping.populate_uuids,
# Added in Rocky
# NOTE(tssurya): This online migration is going to be backported to
# Queens and Pike since instance.avz of instances before Pike
# need to be populated if it was not specified during boot time.
instance_obj.populate_missing_availability_zones,
# Added in Rocky
consumer_obj.create_incomplete_consumers,
# Added in Rocky
instance_mapping_obj.populate_queued_for_delete,
)
def __init__(self):
pass
@staticmethod
def _print_dict(dct, dict_property="Property", dict_value='Value'):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
:param dict_value: header label for the value (second) column
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items()):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY2:
print(encodeutils.safe_encode(pt.get_string()))
else:
print(encodeutils.safe_encode(pt.get_string()).decode())
@args('--version', metavar='<version>', help=argparse.SUPPRESS)
@args('--local_cell', action='store_true',
help='Only sync db in the local cell: do not attempt to fan-out'
'to all cells')
@args('version2', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None, local_cell=False, version2=None):
"""Sync the database up to the most recent version."""
if version and not version2:
print(_("DEPRECATED: The '--version' parameter was deprecated in "
"the Pike cycle and will not be supported in future "
"versions of nova. Use the 'VERSION' positional argument "
"instead"))
version2 = version
if not local_cell:
ctxt = context.RequestContext()
# NOTE(mdoff): Multiple cells not yet implemented. Currently
# fanout only looks for cell0.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt,
objects.CellMapping.CELL0_UUID)
with context.target_cell(ctxt, cell_mapping) as cctxt:
migration.db_sync(version2, context=cctxt)
except exception.CellMappingNotFound:
print(_('WARNING: cell0 mapping not found - not'
' syncing cell0.'))
except Exception as e:
print(_("""ERROR: Could not access cell0.
Has the nova_api database been created?
Has the nova_cell0 database been created?
Has "nova-manage api_db sync" been run?
Has "nova-manage cell_v2 map_cell0" been run?
Is [api_database]/connection set in nova.conf?
Is the cell0 database connection URL correct?
Error: %s""") % six.text_type(e))
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', type=int, metavar='<number>', dest='max_rows',
help='Maximum number of deleted rows to archive. Defaults to 1000.')
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Print how many rows were archived per table.')
@args('--until-complete', action='store_true', dest='until_complete',
default=False,
help=('Run continuously until all deleted rows are archived. Use '
'max_rows as a batch size for each iteration.'))
@args('--purge', action='store_true', dest='purge', default=False,
help='Purge all data from shadow tables after archive completes')
def archive_deleted_rows(self, max_rows=1000, verbose=False,
until_complete=False, purge=False):
"""Move deleted rows from production tables to shadow tables.
Returns 0 if nothing was archived, 1 if some number of rows were
archived, 2 if max_rows is invalid, 3 if no connection could be
established to the API DB. If automating, this should be
run continuously while the result is 1, stopping at 0.
"""
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return 2
if max_rows > db.MAX_INT:
print(_('max rows must be <= %(max_value)d') %
{'max_value': db.MAX_INT})
return 2
ctxt = context.get_admin_context()
try:
# NOTE(tssurya): This check has been added to validate if the API
# DB is reachable or not as this is essential for purging the
# instance_mappings and request_specs of the deleted instances.
objects.CellMappingList.get_all(ctxt)
except db_exc.CantStartEngineError:
print(_('Failed to connect to API DB so aborting this archival '
'attempt. Please check your config file to make sure that '
'CONF.api_database.connection is set and run this '
'command again.'))
return 3
table_to_rows_archived = {}
deleted_instance_uuids = []
if until_complete and verbose:
sys.stdout.write(_('Archiving') + '..') # noqa
while True:
try:
run, deleted_instance_uuids = db.archive_deleted_rows(max_rows)
except KeyboardInterrupt:
run = {}
if until_complete and verbose:
print('.' + _('stopped')) # noqa
break
for k, v in run.items():
table_to_rows_archived.setdefault(k, 0)
table_to_rows_archived[k] += v
if deleted_instance_uuids:
table_to_rows_archived.setdefault('instance_mappings', 0)
table_to_rows_archived.setdefault('request_specs', 0)
table_to_rows_archived.setdefault('instance_group_member', 0)
deleted_mappings = objects.InstanceMappingList.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived['instance_mappings'] += deleted_mappings
deleted_specs = objects.RequestSpec.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived['request_specs'] += deleted_specs
deleted_group_members = (
objects.InstanceGroup.destroy_members_bulk(
ctxt, deleted_instance_uuids))
table_to_rows_archived['instance_group_member'] += (
deleted_group_members)
if not until_complete:
break
elif not run:
if verbose:
print('.' + _('complete')) # noqa
break
if verbose:
sys.stdout.write('.')
if verbose:
if table_to_rows_archived:
self._print_dict(table_to_rows_archived, _('Table'),
dict_value=_('Number of Rows Archived'))
else:
print(_('Nothing was archived.'))
if table_to_rows_archived and purge:
if verbose:
print(_('Rows were archived, running purge...'))
self.purge(purge_all=True, verbose=verbose)
# NOTE(danms): Return nonzero if we archived something
return int(bool(table_to_rows_archived))
@args('--before', dest='before',
help='If specified, purge rows from shadow tables that are older '
'than this. Fuzzy time specs are allowed')
@args('--all', dest='purge_all', action='store_true',
help='Purge all rows in the shadow tables')
@args('--verbose', dest='verbose', action='store_true', default=False,
help='Print information about purged records')
@args('--all-cells', dest='all_cells', action='store_true', default=False,
help='Run against all cell databases')
def purge(self, before=None, purge_all=False, verbose=False,
all_cells=False):
if before is None and purge_all is False:
print(_('Either --before or --all is required'))
return 1
if before:
try:
before_date = dateutil_parser.parse(before, fuzzy=True)
except ValueError as e:
print(_('Invalid value for --before: %s') % e)
return 2
else:
before_date = None
def status(msg):
if verbose:
print('%s: %s' % (identity, msg))
deleted = 0
admin_ctxt = context.get_admin_context()
if all_cells:
try:
cells = objects.CellMappingList.get_all(admin_ctxt)
except db_exc.DBError:
print(_('Unable to get cell list from API DB. '
'Is it configured?'))
return 4
for cell in cells:
identity = _('Cell %s') % cell.identity
with context.target_cell(admin_ctxt, cell) as cctxt:
deleted += sa_db.purge_shadow_tables(cctxt,
before_date,
status_fn=status)
else:
identity = _('DB')
deleted = sa_db.purge_shadow_tables(admin_ctxt,
before_date, status_fn=status)
if deleted:
return 0
else:
return 3
@args('--delete', action='store_true', dest='delete',
help='If specified, automatically delete any records found where '
'instance_uuid is NULL.')
def null_instance_uuid_scan(self, delete=False):
"""Lists and optionally deletes database records where
instance_uuid is NULL.
"""
hits = migration.db_null_instance_uuid_scan(delete)
records_found = False
for table_name, records in hits.items():
# Don't print anything for 0 hits
if records:
records_found = True
if delete:
print(_("Deleted %(records)d records "
"from table '%(table_name)s'.") %
{'records': records, 'table_name': table_name})
else:
print(_("There are %(records)d records in the "
"'%(table_name)s' table where the uuid or "
"instance_uuid column is NULL. Run this "
"command again with the --delete option after you "
"have backed up any necessary data.") %
{'records': records, 'table_name': table_name})
# check to see if we didn't find anything
if not records_found:
print(_('There were no records found where '
'instance_uuid was NULL.'))
def _run_migration(self, ctxt, max_count):
ran = 0
exceptions = False
migrations = {}
for migration_meth in self.online_migrations:
count = max_count - ran
try:
found, done = migration_meth(ctxt, count)
except Exception:
msg = (_("Error attempting to run %(method)s") % dict(
method=migration_meth))
print(msg)
LOG.exception(msg)
exceptions = True
found = done = 0
name = migration_meth.__name__
if found:
print(_('%(total)i rows matched query %(meth)s, %(done)i '
'migrated') % {'total': found,
'meth': name,
'done': done})
migrations[name] = found, done
if max_count is not None:
ran += done
if ran >= max_count:
break
return migrations, exceptions
@args('--max-count', metavar='<number>', dest='max_count',
help='Maximum number of objects to consider')
def online_data_migrations(self, max_count=None):
ctxt = context.get_admin_context()
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive value for max_number'))
return 127
else:
unlimited = True
max_count = 50
print(_('Running batches of %i until complete') % max_count)
ran = None
migration_info = {}
exceptions = False
while ran is None or ran != 0:
migrations, exceptions = self._run_migration(ctxt, max_count)
ran = 0
for name in migrations:
migration_info.setdefault(name, (0, 0))
migration_info[name] = (
migration_info[name][0] + migrations[name][0],
migration_info[name][1] + migrations[name][1],
)
ran += migrations[name][1]
if not unlimited:
break
t = prettytable.PrettyTable([_('Migration'),
_('Total Needed'),
_('Completed')])
for name in sorted(migration_info.keys()):
info = migration_info[name]
t.add_row([name, info[0], info[1]])
print(t)
# NOTE(imacdonn): In the "unlimited" case, the loop above will only
# terminate when all possible migrations have been effected. If we're
# still getting exceptions, there's a problem that requires
# intervention. In the max-count case, exceptions are only considered
# fatal if no work was done by any other migrations ("not ran"),
# because otherwise work may still remain to be done, and that work
# may resolve dependencies for the failing migrations.
if exceptions and (unlimited or not ran):
print(_("Some migrations failed unexpectedly. Check log for "
"details."))
return 2
# TODO(mriedem): Potentially add another return code for
# "there are more migrations, but not completable right now"
return ran and 1 or 0
@args('--resource_class', metavar='<class>', required=True,
help='Ironic node class to set on instances')
@args('--host', metavar='<host>', required=False,
help='Compute service name to migrate nodes on')
@args('--node', metavar='<node>', required=False,
help='Ironic node UUID to migrate (all on the host if omitted)')
@args('--all', action='store_true', default=False, dest='all_hosts',
help='Run migrations for all ironic hosts and nodes')
@args('--verbose', action='store_true', default=False,
help='Print information about migrations being performed')
def ironic_flavor_migration(self, resource_class, host=None, node=None,
all_hosts=False, verbose=False):
"""Migrate flavor information for ironic instances.
This will manually push the instance flavor migration required
for ironic-hosted instances in Pike. The best way to accomplish
this migration is to run your ironic computes normally in Pike.
However, if you need to push the migration manually, then use
this.
This is idempotent, but not trivial to start/stop/resume. It is
recommended that you do this with care and not from a script
assuming it is trivial.
Running with --all may generate a large amount of DB traffic
all at once. Running at least one host at a time is recommended
for batching.
Return values:
0: All work is completed (or none is needed)
1: Specified host and/or node is not found, or no ironic nodes present
2: Internal accounting error shows more than one instance per node
3: Invalid combination of required arguments
"""
if not resource_class:
# Note that if --resource_class is not specified on the command
# line it will actually result in a return code of 2, but we
# leave 3 here for testing purposes.
print(_('A resource_class is required for all modes of operation'))
return 3
ctx = context.get_admin_context()
if all_hosts:
if host or node:
print(_('--all with --host and/or --node does not make sense'))
return 3
cns = objects.ComputeNodeList.get_by_hypervisor_type(ctx, 'ironic')
elif host and node:
try:
cn = objects.ComputeNode.get_by_host_and_nodename(ctx, host,
node)
cns = [cn]
except exception.ComputeHostNotFound:
cns = []
elif host:
try:
cns = objects.ComputeNodeList.get_all_by_host(ctx, host)
except exception.ComputeHostNotFound:
cns = []
else:
print(_('Either --all, --host, or --host and --node are required'))
return 3
if len(cns) == 0:
print(_('No ironic compute nodes found that match criteria'))
return 1
# Check that we at least got one ironic compute and we can pretty
# safely assume the rest are
if cns[0].hypervisor_type != 'ironic':
print(_('Compute node(s) specified is not of type ironic'))
return 1
for cn in cns:
# NOTE(danms): The instance.node is the
# ComputeNode.hypervisor_hostname, which in the case of ironic is
# the node uuid. Since only one instance can be on a node in
# ironic, do another sanity check here to make sure we look legit.
inst = objects.InstanceList.get_by_filters(
ctx, {'node': cn.hypervisor_hostname,
'deleted': False})
if len(inst) > 1:
print(_('Ironic node %s has multiple instances? '
'Something is wrong.') % cn.hypervisor_hostname)
return 2
elif len(inst) == 1:
result = ironic.IronicDriver._pike_flavor_migration_for_node(
ctx, resource_class, inst[0].uuid)
if result and verbose:
print(_('Migrated instance %(uuid)s on node %(node)s') % {
'uuid': inst[0].uuid,
'node': cn.hypervisor_hostname})
return 0
class ApiDbCommands(object):
"""Class for managing the api database."""
def __init__(self):
pass
@args('--version', metavar='<version>', help=argparse.SUPPRESS)
@args('version2', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None, version2=None):
"""Sync the database up to the most recent version.
If placement_database.connection is not None, sync that
database using the API database migrations.
"""
if version and not version2:
print(_("DEPRECATED: The '--version' parameter was deprecated in "
"the Pike cycle and will not be supported in future "
"versions of nova. Use the 'VERSION' positional argument "
"instead"))
version2 = version
# NOTE(cdent): At the moment, the migration code deep in the belly
# of the migration package doesn't actually return anything, so
# returning the result of db_sync is not particularly meaningful
# here. But, in case that changes, we store the result from the
# the placement sync to and with the api sync.
result = True
if CONF.placement_database.connection is not None:
result = migration.db_sync(version2, database='placement')
return migration.db_sync(version2, database='api') and result
def version(self):
"""Print the current database version."""
print(migration.db_version(database='api'))
class CellCommands(object):
"""Commands for managing cells v1 functionality."""
# TODO(stephenfin): Remove this when cells v1 is removed
description = ('DEPRECATED: The cell commands, which configure cells v1 '
'functionality, are deprecated as Cells v1 itself has '
'been deprecated. They will be removed in an upcoming '
'release.')
@staticmethod
def _parse_server_string(server_str):
"""Parses the given server_string and returns a tuple of host and port.
If it's not a combination of host part and port, the port element is an
empty string. If the input is invalid expression, return a tuple of two
empty strings.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number
# combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except (ValueError, netaddr.AddrFormatError):
print('Invalid server_string: %s' % server_str)
return ('', '')
def _create_transport_hosts(self, username, password,
broker_hosts=None, hostname=None, port=None):
"""Returns a list of oslo.messaging.TransportHost objects."""
transport_hosts = []
# Either broker-hosts or hostname should be set
if broker_hosts:
hosts = broker_hosts.split(',')
for host in hosts:
host = host.strip()
broker_hostname, broker_port = self._parse_server_string(host)
if not broker_port:
msg = _('Invalid broker_hosts value: %s. It should be'
' in hostname:port format') % host
raise ValueError(msg)
try:
broker_port = int(broker_port)
except ValueError:
msg = _('Invalid port value: %s. It should be '
'an integer') % broker_port
raise ValueError(msg)
transport_hosts.append(
messaging.TransportHost(
hostname=broker_hostname,
port=broker_port,
username=username,
password=password))
else:
try:
port = int(port)
except ValueError:
msg = _("Invalid port value: %s. Should be an integer") % port
raise ValueError(msg)
transport_hosts.append(
messaging.TransportHost(
hostname=hostname,
port=port,
username=username,
password=password))
return transport_hosts
@args('--name', metavar='<name>', help='Name for the new cell')
@args('--cell_type', metavar='<parent|api|child|compute>',
help='Whether the cell is parent/api or child/compute')
@args('--username', metavar='<username>',
help='Username for the message broker in this cell')
@args('--password', metavar='<password>',
help='Password for the message broker in this cell')
@args('--broker_hosts', metavar='<broker_hosts>',
help='Comma separated list of message brokers in this cell. '
'Each Broker is specified as hostname:port with both '
'mandatory. This option overrides the --hostname '
'and --port options (if provided). ')
@args('--hostname', metavar='<hostname>',
help='Address of the message broker in this cell')
@args('--port', metavar='<number>',
help='Port number of the message broker in this cell')
@args('--virtual_host', metavar='<virtual_host>',
help='The virtual host of the message broker in this cell')
@args('--woffset', metavar='<float>')
@args('--wscale', metavar='<float>')
def create(self, name, cell_type='child', username=None, broker_hosts=None,
password=None, hostname=None, port=None, virtual_host=None,
woffset=None, wscale=None):
if cell_type not in ['parent', 'child', 'api', 'compute']:
print("Error: cell type must be 'parent'/'api' or "
"'child'/'compute'")
return 2
# Set up the transport URL
transport_hosts = self._create_transport_hosts(
username, password,
broker_hosts, hostname,
port)
transport_url = rpc.get_transport_url()
transport_url.hosts.extend(transport_hosts)
transport_url.virtual_host = virtual_host
is_parent = False
if cell_type in ['api', 'parent']:
is_parent = True
values = {'name': name,
'is_parent': is_parent,
'transport_url': urlparse.unquote(str(transport_url)),
'weight_offset': float(woffset),
'weight_scale': float(wscale)}
ctxt = context.get_admin_context()
db.cell_create(ctxt, values)
@args('--cell_name', metavar='<cell_name>',
help='Name of the cell to delete')
def delete(self, cell_name):
ctxt = context.get_admin_context()
db.cell_delete(ctxt, cell_name)
def list(self):
ctxt = context.get_admin_context()
cells = db.cell_get_all(ctxt)
fmt = "%3s %-10s %-6s %-10s %-15s %-5s %-10s"
print(fmt % ('Id', 'Name', 'Type', 'Username', 'Hostname',
'Port', 'VHost'))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
for cell in cells:
url = rpc.get_transport_url(cell.transport_url)
host = url.hosts[0] if url.hosts else messaging.TransportHost()
print(fmt % (cell.id, cell.name,
'parent' if cell.is_parent else 'child',
host.username, host.hostname,
host.port, url.virtual_host))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
class CellV2Commands(object):
"""Commands for managing cells v2."""
def _validate_transport_url(self, transport_url):
transport_url = transport_url or CONF.transport_url
if not transport_url:
print('Must specify --transport-url if [DEFAULT]/transport_url '
'is not set in the configuration file.')
return None
try:
messaging.TransportURL.parse(conf=CONF, url=transport_url)
except (messaging.InvalidTransportURL, ValueError) as e:
print(_('Invalid transport URL: %s') % six.text_type(e))
return None
return transport_url
def _non_unique_transport_url_database_connection_checker(self, ctxt,
cell_mapping, transport_url, database_connection):
for cell in objects.CellMappingList.get_all(ctxt):
if cell_mapping and cell.uuid == cell_mapping.uuid:
# If we're looking for a specific cell, then don't check
# that one for same-ness to allow idempotent updates
continue
if (cell.database_connection == database_connection or
cell.transport_url == transport_url):
print(_('The specified transport_url and/or '
'database_connection combination already exists '
'for another cell with uuid %s.') % cell.uuid)
return True
return False
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help='The transport url for the cell message queue')
def simple_cell_setup(self, transport_url=None):
"""Simple cellsv2 setup.
This simplified command is for use by existing non-cells users to
configure the default environment. If you are using CellsV1, this
will not work for you. Returns 0 if setup is completed (or has
already been done), 1 if no hosts are reporting (and this cannot
be mapped) and 2 if run in a CellsV1 environment.
"""
if CONF.cells.enable:
print('CellsV1 users cannot use this simplified setup command')
return 2
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
ctxt = context.RequestContext()
try:
cell0_mapping = self._map_cell0()
except db_exc.DBDuplicateEntry:
print(_('Cell0 is already setup'))
cell0_mapping = objects.CellMapping.get_by_uuid(
ctxt, objects.CellMapping.CELL0_UUID)
# Run migrations so cell0 is usable
with context.target_cell(ctxt, cell0_mapping) as cctxt:
try:
migration.db_sync(None, context=cctxt)
except db_exc.DBError as ex:
print(_('Unable to sync cell0 schema: %s') % ex)
cell_uuid = self._map_cell_and_hosts(transport_url)
if cell_uuid is None:
# There are no compute hosts which means no cell_mapping was
# created. This should also mean that there are no instances.
return 1
self.map_instances(cell_uuid)
return 0
@args('--database_connection',
metavar='<database_connection>',
help='The database connection url for cell0. '
'This is optional. If not provided, a standard database '
'connection will be used based on the main database connection '
'from the Nova configuration.'
)
def map_cell0(self, database_connection=None):
"""Create a cell mapping for cell0.
cell0 is used for instances that have not been scheduled to any cell.
This generally applies to instances that have encountered an error
before they have been scheduled.
This command creates a cell mapping for this special cell which
requires a database to store the instance data.
Returns 0 if cell0 created successfully or already setup.
"""
try:
self._map_cell0(database_connection=database_connection)
except db_exc.DBDuplicateEntry:
print(_('Cell0 is already setup'))
return 0
def _map_cell0(self, database_connection=None):
"""Faciliate creation of a cell mapping for cell0.
See map_cell0 for more.
"""
def cell0_default_connection():
# If no database connection is provided one is generated
# based on the database connection url.
# The cell0 database will use the same database scheme and
# netloc as the main database, with a related path.
# NOTE(sbauza): The URL has to be RFC1738 compliant in order to
# be usable by sqlalchemy.
connection = CONF.database.connection
# sqlalchemy has a nice utility for parsing database connection
# URLs so we use that here to get the db name so we don't have to
# worry about parsing and splitting a URL which could have special
# characters in the password, which makes parsing a nightmare.
url = sqla_url.make_url(connection)
url.database = url.database + '_cell0'
return urlparse.unquote(str(url))
dbc = database_connection or cell0_default_connection()
ctxt = context.RequestContext()
# A transport url of 'none://' is provided for cell0. RPC should not
# be used to access cell0 objects. Cells transport switching will
# ignore any 'none' transport type.
cell_mapping = objects.CellMapping(
ctxt, uuid=objects.CellMapping.CELL0_UUID, name="cell0",
transport_url="none:///",
database_connection=dbc)
cell_mapping.create()
return cell_mapping
def _get_and_map_instances(self, ctxt, cell_mapping, limit, marker):
filters = {}
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_filters(
cctxt.elevated(read_deleted='yes'), filters,
sort_key='created_at', sort_dir='asc', limit=limit,
marker=marker)
for instance in instances:
try:
mapping = objects.InstanceMapping(ctxt)
mapping.instance_uuid = instance.uuid
mapping.cell_mapping = cell_mapping
mapping.project_id = instance.project_id
mapping.create()
except db_exc.DBDuplicateEntry:
continue
if len(instances) == 0 or len(instances) < limit:
# We've hit the end of the instances table
marker = None
else:
marker = instances[-1].uuid
return marker
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True,
help='Unmigrated instances will be mapped to the cell with the '
'uuid provided.')
@args('--max-count', metavar='<max_count>', dest='max_count',
help='Maximum number of instances to map. If not set, all instances '
'in the cell will be mapped in batches of 50. If you have a '
'large number of instances, consider specifying a custom value '
'and run the command until it exits with 0.')
@args('--reset', action='store_true', dest='reset_marker',
help='The command will start from the beginning as opposed to the '
'default behavior of starting from where the last run '
'finished')
def map_instances(self, cell_uuid, max_count=None, reset_marker=None):
"""Map instances into the provided cell.
Instances in the nova database of the provided cell (nova database
info is obtained from the nova-api database) will be queried from
oldest to newest and if unmapped, will be mapped to the provided cell.
A max-count can be set on the number of instance to map in a single
run. Repeated runs of the command will start from where the last run
finished so it is not necessary to increase max-count to finish. A
reset option can be passed which will reset the marker, thus making the
command start from the beginning as opposed to the default behavior of
starting from where the last run finished. An exit code of 0 indicates
that all instances have been mapped.
"""
# NOTE(stephenfin): The support for batching in this command relies on
# a bit of a hack. We initially process N instance-cell mappings, where
# N is the value of '--max-count' if provided else 50. To ensure we
# can continue from N on the next iteration, we store a instance-cell
# mapping object with a special name and the UUID of the last
# instance-cell mapping processed (N - 1) in munged form. On the next
# iteration, we search for the special name and unmunge the UUID to
# pick up where we left off. This is done until all mappings are
# processed. The munging is necessary as there's a unique constraint on
# the UUID field and we need something reversable. For more
# information, see commit 9038738d0.
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
map_all = False
if max_count < 1:
print(_('Must supply a positive value for max-count'))
return 127
else:
map_all = True
max_count = 50
ctxt = context.RequestContext()
marker_project_id = 'INSTANCE_MIGRATION_MARKER'
# Validate the cell exists, this will raise if not
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# Check for a marker from a previous run
marker_mapping = objects.InstanceMappingList.get_by_project_id(ctxt,
marker_project_id)
if len(marker_mapping) == 0:
marker = None
else:
# There should be only one here
marker = marker_mapping[0].instance_uuid.replace(' ', '-')
if reset_marker:
marker = None
marker_mapping[0].destroy()
next_marker = True
while next_marker is not None:
next_marker = self._get_and_map_instances(ctxt, cell_mapping,
max_count, marker)
marker = next_marker
if not map_all:
break
if next_marker:
# Don't judge me. There's already an InstanceMapping with this UUID
# so the marker needs to be non destructively modified.
next_marker = next_marker.replace('-', ' ')
objects.InstanceMapping(ctxt, instance_uuid=next_marker,
project_id=marker_project_id).create()
return 1
return 0
def _map_cell_and_hosts(self, transport_url, name=None, verbose=False):
ctxt = context.RequestContext()
cell_mapping_uuid = cell_mapping = None
# First, try to detect if a CellMapping has already been created
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
if not compute_nodes:
print(_('No hosts found to map to cell, exiting.'))
return None
missing_nodes = set()
for compute_node in compute_nodes:
try:
host_mapping = objects.HostMapping.get_by_host(
ctxt, compute_node.host)
except exception.HostMappingNotFound:
missing_nodes.add(compute_node.host)
else:
if verbose:
print(_(
'Host %(host)s is already mapped to cell %(uuid)s'
) % {'host': host_mapping.host,
'uuid': host_mapping.cell_mapping.uuid})
# Re-using the existing UUID in case there is already a mapping
# NOTE(sbauza): There could be possibly multiple CellMappings
# if the operator provides another configuration file and moves
# the hosts to another cell v2, but that's not really something
# we should support.
cell_mapping_uuid = host_mapping.cell_mapping.uuid
if not missing_nodes:
print(_('All hosts are already mapped to cell(s), exiting.'))
return cell_mapping_uuid
# Create the cell mapping in the API database
if cell_mapping_uuid is not None:
cell_mapping = objects.CellMapping.get_by_uuid(
ctxt, cell_mapping_uuid)
if cell_mapping is None:
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt, uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=CONF.database.connection)
cell_mapping.create()
# Pull the hosts from the cell database and create the host mappings
for compute_host in missing_nodes:
host_mapping = objects.HostMapping(
ctxt, host=compute_host, cell_mapping=cell_mapping)
host_mapping.create()
if verbose:
print(cell_mapping_uuid)
return cell_mapping_uuid
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help='The transport url for the cell message queue')
@args('--name', metavar='<cell_name>', help='The name of the cell')
@args('--verbose', action='store_true',
help='Output the cell mapping uuid for any newly mapped hosts.')
def map_cell_and_hosts(self, transport_url=None, name=None, verbose=False):
"""EXPERIMENTAL. Create a cell mapping and host mappings for a cell.
Users not dividing their cloud into multiple cells will be a single
cell v2 deployment and should specify:
nova-manage cell_v2 map_cell_and_hosts --config-file <nova.conf>
Users running multiple cells can add a cell v2 by specifying:
nova-manage cell_v2 map_cell_and_hosts --config-file <cell nova.conf>
"""
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
self._map_cell_and_hosts(transport_url, name, verbose)
# online_data_migrations established a pattern of 0 meaning everything
# is done, 1 means run again to do more work. This command doesn't do
# partial work so 0 is appropriate.
return 0
@args('--uuid', metavar='<instance_uuid>', dest='uuid', required=True,
help=_('The instance UUID to verify'))
@args('--quiet', action='store_true', dest='quiet',
help=_('Do not print anything'))
def verify_instance(self, uuid, quiet=False):
"""Verify instance mapping to a cell.
This command is useful to determine if the cellsv2 environment is
properly setup, specifically in terms of the cell, host, and instance
mapping records required.
This prints one of three strings (and exits with a code) indicating
whether the instance is successfully mapped to a cell (0), is unmapped
due to an incomplete upgrade (1), unmapped due to normally transient
state (2), it is a deleted instance which has instance mapping (3),
or it is an archived instance which still has an instance mapping (4).
"""
def say(string):
if not quiet:
print(string)
ctxt = context.get_admin_context()
try:
mapping = objects.InstanceMapping.get_by_instance_uuid(
ctxt, uuid)
except exception.InstanceMappingNotFound:
say('Instance %s is not mapped to a cell '
'(upgrade is incomplete) or instance '
'does not exist' % uuid)
return 1
if mapping.cell_mapping is None:
say('Instance %s is not mapped to a cell' % uuid)
return 2
else:
with context.target_cell(ctxt, mapping.cell_mapping) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, uuid)
except exception.InstanceNotFound:
try:
el_ctx = cctxt.elevated(read_deleted='yes')
instance = objects.Instance.get_by_uuid(el_ctx, uuid)
# instance is deleted
if instance:
say('The instance with uuid %s has been deleted.'
% uuid)
say('Execute `nova-manage db archive_deleted_rows`'
'command to archive this deleted instance and'
'remove its instance_mapping.')
return 3
except exception.InstanceNotFound:
# instance is archived
say('The instance with uuid %s has been archived.'
% uuid)
say('However its instance_mapping remains.')
return 4
# instance is alive and mapped to a cell
say('Instance %s is in cell: %s (%s)' % (
uuid,
mapping.cell_mapping.name,
mapping.cell_mapping.uuid))
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help='If provided only this cell will be searched for new hosts to '
'map.')
@args('--verbose', action='store_true',
help=_('Provide detailed output when discovering hosts.'))
@args('--strict', action='store_true',
help=_('Considered successful (exit code 0) only when an unmapped '
'host is discovered. Any other outcome will be considered a '
'failure (exit code 1).'))
@args('--by-service', action='store_true', default=False,
dest='by_service',
help=_('Discover hosts by service instead of compute node'))
def discover_hosts(self, cell_uuid=None, verbose=False, strict=False,
by_service=False):
"""Searches cells, or a single cell, and maps found hosts.
When a new host is added to a deployment it will add a service entry
to the db it's configured to use. This command will check the db for
each cell, or a single one if passed in, and map any hosts which are
not currently mapped. If a host is already mapped nothing will be done.
"""
def status_fn(msg):
if verbose:
print(msg)
ctxt = context.RequestContext()
hosts = host_mapping_obj.discover_hosts(ctxt, cell_uuid, status_fn,
by_service)
# discover_hosts will return an empty list if no hosts are discovered
if strict:
return int(not hosts)
@action_description(
_("Add a new cell to nova API database. "
"DB and MQ urls can be provided directly "
"or can be taken from config. The result is cell uuid."))
@args('--name', metavar='<cell_name>', help=_('The name of the cell'))
@args('--database_connection', metavar='<database_connection>',
dest='database_connection',
help=_('The database url for the cell database'))
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help=_('The transport url for the cell message queue'))
@args('--verbose', action='store_true',
help=_('Output the uuid of the created cell'))
@args('--disabled', action='store_true',
help=_('To create a pre-disabled cell.'))
def create_cell(self, name=None, database_connection=None,
transport_url=None, verbose=False, disabled=False):
ctxt = context.get_context()
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
database_connection = database_connection or CONF.database.connection
if not database_connection:
print(_('Must specify --database_connection '
'if [database]/connection is not set '
'in the configuration file.'))
return 1
if (self._non_unique_transport_url_database_connection_checker(ctxt,
None, transport_url, database_connection)):
return 2
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt,
uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=database_connection,
disabled=disabled)
cell_mapping.create()
if verbose:
print(cell_mapping_uuid)
return 0
@args('--verbose', action='store_true',
help=_('Show sensitive details, such as passwords'))
def list_cells(self, verbose=False):
"""Lists the v2 cells in the deployment.
By default the cell name, uuid, disabled state, masked transport
URL and database connection details are shown. Use the --verbose
option to see transport URL and database connection with their
sensitive details.
"""
cell_mappings = objects.CellMappingList.get_all(
context.get_admin_context())
field_names = [_('Name'), _('UUID'), _('Transport URL'),
_('Database Connection'), _('Disabled')]
t = prettytable.PrettyTable(field_names)
for cell in sorted(cell_mappings,
# CellMapping.name is optional
key=lambda _cell: _cell.name or ''):
fields = [cell.name or '', cell.uuid]
if verbose:
fields.extend([cell.transport_url, cell.database_connection])
else:
fields.extend([
mask_passwd_in_url(cell.transport_url),
mask_passwd_in_url(cell.database_connection)])
fields.extend([cell.disabled])
t.add_row(fields)
print(t)
return 0
@args('--force', action='store_true', default=False,
help=_('Delete hosts and instance_mappings that belong '
'to the cell as well.'))
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell to delete.'))
def delete_cell(self, cell_uuid, force=False):
"""Delete an empty cell by the given uuid.
This command will return a non-zero exit code in the following cases.
* The cell is not found by uuid.
* It has hosts and force is False.
* It has instance mappings and force is False.
If force is True and the cell has hosts and/or instance_mappings, they
are deleted as well (as long as there are no living instances).
Returns 0 in the following cases.
* The empty cell is found and deleted successfully.
* The cell has hosts and force is True then the cell, hosts and
instance_mappings are deleted successfully; if there are no
living instances.
"""
ctxt = context.get_admin_context()
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
# Check to see if there are any HostMappings for this cell.
host_mappings = objects.HostMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
nodes = []
if host_mappings:
if not force:
print(_('There are existing hosts mapped to cell with uuid '
'%s.') % cell_uuid)
return 2
# We query for the compute nodes in the cell,
# so that they can be unmapped.
with context.target_cell(ctxt, cell_mapping) as cctxt:
nodes = objects.ComputeNodeList.get_all(cctxt)
# Check to see if there are any InstanceMappings for this cell.
instance_mappings = objects.InstanceMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
if instance_mappings:
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_all(cctxt)
if instances:
# There are instances in the cell.
print(_('There are existing instances mapped to cell with '
'uuid %s.') % cell_uuid)
return 3
else:
if not force:
# There are no instances in the cell but the records remain
# in the 'instance_mappings' table.
print(_("There are instance mappings to cell with uuid "
"%s, but all instances have been deleted "
"in the cell.") % cell_uuid)
print(_("So execute 'nova-manage db archive_deleted_rows' "
"to delete the instance mappings."))
return 4
# Delete instance_mappings of the deleted instances
for instance_mapping in instance_mappings:
instance_mapping.destroy()
# Unmap the compute nodes so that they can be discovered
# again in future, if needed.
for node in nodes:
node.mapped = 0
node.save()
# Delete hosts mapped to the cell.
for host_mapping in host_mappings:
host_mapping.destroy()
# There are no hosts or instances mapped to the cell so delete it.
cell_mapping.destroy()
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell to update.'))
@args('--name', metavar='<cell_name>', dest='name',
help=_('Set the cell name.'))
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help=_('Set the cell transport_url. NOTE that running nodes '
'will not see the change until restart!'))
@args('--database_connection', metavar='<database_connection>',
dest='db_connection',
help=_('Set the cell database_connection. NOTE that running nodes '
'will not see the change until restart!'))
@args('--disable', action='store_true', dest='disable',
help=_('Disables the cell. Note that the scheduling will be blocked '
'to this cell until its enabled and followed by a SIGHUP of '
'nova-scheduler service.'))
@args('--enable', action='store_true', dest='enable',
help=_('Enables the cell. Note that this makes a disabled cell '
'available for scheduling after a SIGHUP of the '
'nova-scheduler service'))
def update_cell(self, cell_uuid, name=None, transport_url=None,
db_connection=None, disable=False, enable=False):
"""Updates the properties of a cell by the given uuid.
If the cell is not found by uuid, this command will return an exit
code of 1. If the provided transport_url or/and database_connection
is/are same as another cell, this command will return an exit code
of 3. If the properties cannot be set, this will return 2. If an
attempt is made to disable and enable a cell at the same time, this
command will exit with a return code of 4. If an attempt is made to
disable or enable cell0 this command will exit with a return code of 5.
Otherwise, the exit code will be 0.
NOTE: Updating the transport_url or database_connection fields on
a running system will NOT result in all nodes immediately using the
new values. Use caution when changing these values.
NOTE (tssurya): The scheduler will not notice that a cell has been
enabled/disabled until it is restarted or sent the SIGHUP signal.
"""
ctxt = context.get_admin_context()
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
if name:
cell_mapping.name = name
transport_url = transport_url or CONF.transport_url
db_connection = db_connection or CONF.database.connection
if (self._non_unique_transport_url_database_connection_checker(ctxt,
cell_mapping, transport_url, db_connection)):
# We use the return code 3 before 2 to avoid changing the
# semantic meanings of return codes.
return 3
if transport_url:
cell_mapping.transport_url = transport_url
if db_connection:
cell_mapping.database_connection = db_connection
if disable and enable:
print(_('Cell cannot be disabled and enabled at the same time.'))
return 4
if disable or enable:
if cell_mapping.is_cell0():
print(_('Cell0 cannot be disabled.'))
return 5
elif disable and not cell_mapping.disabled:
cell_mapping.disabled = True
elif enable and cell_mapping.disabled:
cell_mapping.disabled = False
elif disable and cell_mapping.disabled:
print(_('Cell %s is already disabled') % cell_uuid)
elif enable and not cell_mapping.disabled:
print(_('Cell %s is already enabled') % cell_uuid)
try:
cell_mapping.save()
except Exception as e:
print(_('Unable to update CellMapping: %s') % e)
return 2
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help=_('The uuid of the cell.'))
def list_hosts(self, cell_uuid=None):
"""Lists the hosts in one or all v2 cells."""
ctxt = context.get_admin_context()
if cell_uuid:
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
host_mappings = objects.HostMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
else:
host_mappings = objects.HostMappingList.get_all(ctxt)
field_names = [_('Cell Name'), _('Cell UUID'), _('Hostname')]
t = prettytable.PrettyTable(field_names)
for host in sorted(host_mappings, key=lambda _host: _host.host):
fields = [host.cell_mapping.name, host.cell_mapping.uuid,
host.host]
t.add_row(fields)
print(t)
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell.'))
@args('--host', metavar='<host>', dest='host',
required=True, help=_('The host to delete.'))
def delete_host(self, cell_uuid, host):
"""Delete a host in a cell (host mappings) by the given host name
This command will return a non-zero exit code in the following cases.
* The cell is not found by uuid.
* The host is not found by host name.
* The host is not in the cell.
* The host has instances.
Returns 0 if the host is deleted successfully.
"""
ctxt = context.get_admin_context()
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
try:
host_mapping = objects.HostMapping.get_by_host(ctxt, host)
except exception.HostMappingNotFound:
print(_('The host %s was not found.') % host)
return 2
if host_mapping.cell_mapping.uuid != cell_mapping.uuid:
print(_('The host %(host)s was not found '
'in the cell %(cell_uuid)s.') % {'host': host,
'cell_uuid': cell_uuid})
return 3
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_host(cctxt, host)
try:
nodes = objects.ComputeNodeList.get_all_by_host(cctxt, host)
except exception.ComputeHostNotFound:
nodes = []
if instances:
print(_('There are instances on the host %s.') % host)
return 4
for node in nodes:
node.mapped = 0
node.save()
host_mapping.destroy()
return 0
class PlacementCommands(object):
"""Commands for managing placement resources."""
@staticmethod
def _get_compute_node_uuid(ctxt, instance, node_cache):
"""Find the ComputeNode.uuid for the given Instance
:param ctxt: cell-targeted nova.context.RequestContext
:param instance: the instance to lookup a compute node
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:returns: ComputeNode.uuid for the given instance
:raises: nova.exception.ComputeHostNotFound
"""
if instance.node in node_cache:
return node_cache[instance.node]
compute_node = objects.ComputeNode.get_by_host_and_nodename(
ctxt, instance.host, instance.node)
node_uuid = compute_node.uuid
node_cache[instance.node] = node_uuid
return node_uuid
def _heal_allocations_for_instance(self, ctxt, instance, node_cache,
output, placement):
"""Checks the given instance to see if it needs allocation healing
:param ctxt: cell-targeted nova.context.RequestContext
:param instance: the instance to check for allocation healing
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:param outout: function that takes a single message for verbose output
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:return: True if allocations were created or updated for the instance,
None if nothing needed to be done
:raises: nova.exception.ComputeHostNotFound if a compute node for a
given instance cannot be found
:raises: AllocationCreateFailed if unable to create allocations for
a given instance against a given compute node resource provider
:raises: AllocationUpdateFailed if unable to update allocations for
a given instance with consumer project/user information
"""
if instance.task_state is not None:
output(_('Instance %(instance)s is undergoing a task '
'state transition: %(task_state)s') %
{'instance': instance.uuid,
'task_state': instance.task_state})
return
if instance.node is None:
output(_('Instance %s is not on a host.') % instance.uuid)
return
try:
allocations = placement.get_allocs_for_consumer(
ctxt, instance.uuid)
except ks_exc.ClientException as e:
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=_("Allocation retrieval failed: %s") % e)
except exception.ConsumerAllocationRetrievalFailed as e:
output(_("Allocation retrieval failed: %s") % e)
allocations = None
# get_allocations_for_consumer uses safe_connect which will
# return None if we can't communicate with Placement, and the
# response can have an empty {'allocations': {}} response if
# there are no allocations for the instance so handle both
if allocations and allocations.get('allocations'):
# Check to see if the allocation project_id
# and user_id matches the instance project and user and
# fix the allocation project/user if they don't match.
# Allocations created before Placement API version 1.8
# did not have a project_id/user_id, and migrated records
# could have sentinel values from config.
if (allocations.get('project_id') ==
instance.project_id and
allocations.get('user_id') == instance.user_id):
output(_('Instance %s already has allocations with '
'matching consumer project/user.') %
instance.uuid)
return
# We have an instance with allocations but not the correct
# project_id/user_id, so we want to update the allocations
# and re-put them. We don't use put_allocations here
# because we don't want to mess up shared or nested
# provider allocations.
allocations['project_id'] = instance.project_id
allocations['user_id'] = instance.user_id
# We use CONSUMER_GENERATION_VERSION for PUT
# /allocations/{consumer_id} to mirror the body structure from
# get_allocs_for_consumer.
resp = placement.put(
'/allocations/%s' % instance.uuid,
allocations, version=report.CONSUMER_GENERATION_VERSION)
if resp:
output(_('Successfully updated allocations for '
'instance %s.') % instance.uuid)
return True
else:
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid, error=resp.text)
# This instance doesn't have allocations so we need to find
# its compute node resource provider.
node_uuid = self._get_compute_node_uuid(
ctxt, instance, node_cache)
# Now get the resource allocations for the instance based
# on its embedded flavor.
resources = scheduler_utils.resources_from_flavor(
instance, instance.flavor)
if placement.put_allocations(
ctxt, node_uuid, instance.uuid, resources,
instance.project_id, instance.user_id,
consumer_generation=None):
output(_('Successfully created allocations for '
'instance %(instance)s against resource '
'provider %(provider)s.') %
{'instance': instance.uuid, 'provider': node_uuid})
return True
else:
raise exception.AllocationCreateFailed(
instance=instance.uuid, provider=node_uuid)
def _heal_instances_in_cell(self, ctxt, max_count, unlimited, output,
placement):
"""Checks for instances to heal in a given cell.
:param ctxt: cell-targeted nova.context.RequestContext
:param max_count: batch size (limit per instance query)
:param unlimited: True if all instances in the cell should be
processed, else False to just process $max_count instances
:param outout: function that takes a single message for verbose output
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:return: Number of instances that had allocations created.
:raises: nova.exception.ComputeHostNotFound if a compute node for a
given instance cannot be found
:raises: AllocationCreateFailed if unable to create allocations for
a given instance against a given compute node resource provider
:raises: AllocationUpdateFailed if unable to update allocations for
a given instance with consumer project/user information
"""
# Keep a cache of instance.node to compute node resource provider UUID.
# This will save some queries for non-ironic instances to the
# compute_nodes table.
node_cache = {}
# Track the total number of instances that have allocations created
# for them in this cell. We return when num_processed equals max_count
# and unlimited=True or we exhaust the number of instances to process
# in this cell.
num_processed = 0
# Get all instances from this cell which have a host and are not
# undergoing a task state transition. Go from oldest to newest.
# NOTE(mriedem): Unfortunately we don't have a marker to use
# between runs where the user is specifying --max-count.
# TODO(mriedem): Store a marker in system_metadata so we can
# automatically pick up where we left off without the user having
# to pass it in (if unlimited is False).
filters = {'deleted': False}
instances = objects.InstanceList.get_by_filters(
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
limit=max_count, expected_attrs=['flavor'])
while instances:
output(_('Found %s candidate instances.') % len(instances))
# For each instance in this list, we need to see if it has
# allocations in placement and if so, assume it's correct and
# continue.
for instance in instances:
if self._heal_allocations_for_instance(
ctxt, instance, node_cache, output, placement):
num_processed += 1
# Make sure we don't go over the max count. Note that we
# don't include instances that already have allocations in the
# max_count number, only the number of instances that have
# successfully created allocations.
if not unlimited and num_processed == max_count:
return num_processed
# Use a marker to get the next page of instances in this cell.
# Note that InstanceList doesn't support slice notation.
marker = instances[len(instances) - 1].uuid
instances = objects.InstanceList.get_by_filters(
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
limit=max_count, marker=marker, expected_attrs=['flavor'])
return num_processed
@action_description(
_("Iterates over non-cell0 cells looking for instances which do "
"not have allocations in the Placement service, or have incomplete "
"consumer project_id/user_id values in existing allocations, and "
"which are not undergoing a task state transition. For each "
"instance found, allocations are created (or updated) against the "
"compute node resource provider for that instance based on the "
"flavor associated with the instance. This command requires that "
"the [api_database]/connection and [placement] configuration "
"options are set."))
@args('--max-count', metavar='<max_count>', dest='max_count',
help='Maximum number of instances to process. If not specified, all '
'instances in each cell will be mapped in batches of 50. '
'If you have a large number of instances, consider specifying '
'a custom value and run the command until it exits with '
'0 or 4.')
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
def heal_allocations(self, max_count=None, verbose=False):
"""Heals instance allocations in the Placement service
Return codes:
* 0: Command completed successfully and allocations were created.
* 1: --max-count was reached and there are more instances to process.
* 2: Unable to find a compute node record for a given instance.
* 3: Unable to create (or update) allocations for an instance against
its compute node resource provider.
* 4: Command completed successfully but no allocations were created.
* 127: Invalid input.
"""
# NOTE(mriedem): Thoughts on ways to expand this:
# - add a --dry-run option to just print which instances would have
# allocations created for them
# - allow passing a specific cell to heal
# - allow filtering on enabled/disabled cells
# - allow passing a specific instance to heal
# - add a force option to force allocations for instances which have
# task_state is not None (would get complicated during a migration);
# for example, this could cleanup ironic instances that have
# allocations on VCPU/MEMORY_MB/DISK_GB but are now using a custom
# resource class
# - add an option to overwrite allocations for instances which already
# have allocations (but the operator thinks might be wrong?); this
# would probably only be safe with a specific instance.
# - deal with nested resource providers?
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
# TODO(mriedem): Rather than --max-count being both a total and batch
# count, should we have separate options to be specific, i.e. --total
# and --batch-size? Then --batch-size defaults to 50 and --total
# defaults to None to mean unlimited.
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive integer for --max-count.'))
return 127
else:
max_count = 50
unlimited = True
output(_('Running batches of %i until complete') % max_count)
ctxt = context.get_admin_context()
cells = objects.CellMappingList.get_all(ctxt)
if not cells:
output(_('No cells to process.'))
return 4
placement = report.SchedulerReportClient()
num_processed = 0
# TODO(mriedem): Use context.scatter_gather_skip_cell0.
for cell in cells:
# Skip cell0 since that is where instances go that do not get
# scheduled and hence would not have allocations against a host.
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
output(_('Looking for instances in cell: %s') % cell.identity)
limit_per_cell = max_count
if not unlimited:
# Adjust the limit for the next cell. For example, if the user
# only wants to process a total of 100 instances and we did
# 75 in cell1, then we only need 25 more from cell2 and so on.
limit_per_cell = max_count - num_processed
with context.target_cell(ctxt, cell) as cctxt:
try:
num_processed += self._heal_instances_in_cell(
cctxt, limit_per_cell, unlimited, output, placement)
except exception.ComputeHostNotFound as e:
print(e.format_message())
return 2
except (exception.AllocationCreateFailed,
exception.AllocationUpdateFailed) as e:
print(e.format_message())
return 3
# Make sure we don't go over the max count. Note that we
# don't include instances that already have allocations in the
# max_count number, only the number of instances that have
# successfully created allocations.
if num_processed == max_count:
output(_('Max count reached. Processed %s instances.')
% num_processed)
return 1
output(_('Processed %s instances.') % num_processed)
if not num_processed:
return 4
return 0
@staticmethod
def _get_rp_uuid_for_host(ctxt, host):
"""Finds the resource provider (compute node) UUID for the given host.
:param ctxt: cell-targeted nova RequestContext
:param host: name of the compute host
:returns: The UUID of the resource provider (compute node) for the host
:raises: nova.exception.HostMappingNotFound if no host_mappings record
is found for the host; indicates
"nova-manage cell_v2 discover_hosts" needs to be run on the cell.
:raises: nova.exception.ComputeHostNotFound if no compute_nodes record
is found in the cell database for the host; indicates the
nova-compute service on that host might need to be restarted.
:raises: nova.exception.TooManyComputesForHost if there are more than
one compute_nodes records in the cell database for the host which
is only possible (under normal circumstances) for ironic hosts but
ironic hosts are not currently supported with host aggregates so
if more than one compute node is found for the host, it is
considered an error which the operator will need to resolve
manually.
"""
# Get the host mapping to determine which cell it's in.
hm = objects.HostMapping.get_by_host(ctxt, host)
# Now get the compute node record for the host from the cell.
with context.target_cell(ctxt, hm.cell_mapping) as cctxt:
# There should really only be one, since only ironic
# hosts can have multiple nodes, and you can't have
# ironic hosts in aggregates for that reason. If we
# find more than one, it's an error.
nodes = objects.ComputeNodeList.get_all_by_host(
cctxt, host)
if len(nodes) > 1:
# This shouldn't happen, so we need to bail since we
# won't know which node to use.
raise exception.TooManyComputesForHost(
num_computes=len(nodes), host=host)
return nodes[0].uuid
@action_description(
_("Mirrors compute host aggregates to resource provider aggregates "
"in the Placement service. Requires the [api_database] and "
"[placement] sections of the nova configuration file to be "
"populated."))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
# TODO(mriedem): Add an option for the 'remove aggregate' behavior.
# We know that we want to mirror hosts aggregate membership to
# placement, but regarding removal, what if the operator or some external
# tool added the resource provider to an aggregate but there is no matching
# host aggregate, e.g. ironic nodes or shared storage provider
# relationships?
# TODO(mriedem): Probably want an option to pass a specific host instead of
# doing all of them.
def sync_aggregates(self, verbose=False):
"""Synchronizes nova host aggregates with resource provider aggregates
Adds nodes to missing provider aggregates in Placement.
NOTE: Depending on the size of your deployment and the number of
compute hosts in aggregates, this command could cause a non-negligible
amount of traffic to the placement service and therefore is
recommended to be run during maintenance windows.
Return codes:
* 0: Successful run
* 1: A host was found with more than one matching compute node record
* 2: An unexpected error occurred while working with the placement API
* 3: Failed updating provider aggregates in placement
* 4: Host mappings not found for one or more host aggregate members
* 5: Compute node records not found for one or more hosts
* 6: Resource provider not found by uuid for a given host
"""
# Start by getting all host aggregates.
ctxt = context.get_admin_context()
aggregate_api = compute_api.AggregateAPI()
placement = aggregate_api.placement_client
aggregates = aggregate_api.get_aggregate_list(ctxt)
# Now we're going to loop over the existing compute hosts in aggregates
# and check to see if their corresponding resource provider, found via
# the host's compute node uuid, are in the same aggregate. If not, we
# add the resource provider to the aggregate in Placement.
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
output(_('Filling in missing placement aggregates'))
# Since hosts can be in more than one aggregate, keep track of the host
# to its corresponding resource provider uuid to avoid redundant
# lookups.
host_to_rp_uuid = {}
unmapped_hosts = set() # keep track of any missing host mappings
computes_not_found = set() # keep track of missing nodes
providers_not_found = {} # map of hostname to missing provider uuid
for aggregate in aggregates:
output(_('Processing aggregate: %s') % aggregate.name)
for host in aggregate.hosts:
output(_('Processing host: %s') % host)
rp_uuid = host_to_rp_uuid.get(host)
if not rp_uuid:
try:
rp_uuid = self._get_rp_uuid_for_host(ctxt, host)
host_to_rp_uuid[host] = rp_uuid
except exception.HostMappingNotFound:
# Don't fail on this now, we can dump it at the end.
unmapped_hosts.add(host)
continue
except exception.ComputeHostNotFound:
# Don't fail on this now, we can dump it at the end.
computes_not_found.add(host)
continue
except exception.TooManyComputesForHost as e:
# TODO(mriedem): Should we treat this like the other
# errors and not fail immediately but dump at the end?
print(e.format_message())
return 1
# We've got our compute node record, so now we can look to
# see if the matching resource provider, found via compute
# node uuid, is in the same aggregate in placement, found via
# aggregate uuid.
# NOTE(mriedem): We could re-use placement.aggregate_add_host
# here although that has to do the provider lookup by host as
# well, but it does handle generation conflicts.
resp = placement.get( # use 1.19 to get the generation
'/resource_providers/%s/aggregates' % rp_uuid,
version='1.19')
if resp:
body = resp.json()
provider_aggregate_uuids = body['aggregates']
# The moment of truth: is the provider in the same host
# aggregate relationship?
aggregate_uuid = aggregate.uuid
if aggregate_uuid not in provider_aggregate_uuids:
# Add the resource provider to this aggregate.
provider_aggregate_uuids.append(aggregate_uuid)
# Now update the provider aggregates using the
# generation to ensure we're conflict-free.
aggregate_update_body = {
'aggregates': provider_aggregate_uuids,
'resource_provider_generation':
body['resource_provider_generation']
}
put_resp = placement.put(
'/resource_providers/%s/aggregates' % rp_uuid,
aggregate_update_body, version='1.19')
if put_resp:
output(_('Successfully added host (%(host)s) and '
'provider (%(provider)s) to aggregate '
'(%(aggregate)s).') %
{'host': host, 'provider': rp_uuid,
'aggregate': aggregate_uuid})
elif put_resp.status_code == 404:
# We must have raced with a delete on the resource
# provider.
providers_not_found[host] = rp_uuid
else:
# TODO(mriedem): Handle 409 conflicts by retrying
# the operation.
print(_('Failed updating provider aggregates for '
'host (%(host)s), provider (%(provider)s) '
'and aggregate (%(aggregate)s). Error: '
'%(error)s') %
{'host': host, 'provider': rp_uuid,
'aggregate': aggregate_uuid,
'error': put_resp.text})
return 3
elif resp.status_code == 404:
# The resource provider wasn't found. Store this for later.
providers_not_found[host] = rp_uuid
else:
print(_('An error occurred getting resource provider '
'aggregates from placement for provider '
'%(provider)s. Error: %(error)s') %
{'provider': rp_uuid, 'error': resp.text})
return 2
# Now do our error handling. Note that there is no real priority on
# the error code we return. We want to dump all of the issues we hit
# so the operator can fix them before re-running the command, but
# whether we return 4 or 5 or 6 doesn't matter.
return_code = 0
if unmapped_hosts:
print(_('The following hosts were found in nova host aggregates '
'but no host mappings were found in the nova API DB. Run '
'"nova-manage cell_v2 discover_hosts" and then retry. '
'Missing: %s') % ','.join(unmapped_hosts))
return_code = 4
if computes_not_found:
print(_('Unable to find matching compute_nodes record entries in '
'the cell database for the following hosts; does the '
'nova-compute service on each host need to be restarted? '
'Missing: %s') % ','.join(computes_not_found))
return_code = 5
if providers_not_found:
print(_('Unable to find matching resource provider record in '
'placement with uuid for the following hosts: %s. Try '
'restarting the nova-compute service on each host and '
'then retry.') %
','.join('(%s=%s)' % (host, providers_not_found[host])
for host in sorted(providers_not_found.keys())))
return_code = 6
return return_code
CATEGORIES = {
'api_db': ApiDbCommands,
'cell': CellCommands,
'cell_v2': CellV2Commands,
'db': DbCommands,
'floating': FloatingIpCommands,
'network': NetworkCommands,
'placement': PlacementCommands
}
add_command_parsers = functools.partial(cmd_common.add_command_parsers,
categories=CATEGORIES)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
post_mortem_opt = cfg.BoolOpt('post-mortem',
default=False,
help='Allow post-mortem debugging')
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opts([category_opt, post_mortem_opt])
config.parse_args(sys.argv)
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
_EXTRA_DEFAULT_LOG_LEVELS)
logging.setup(CONF, "nova")
objects.register_all()
if CONF.category.name == "version":
print(version.version_string_with_package())
return 0
if CONF.category.name == "bash-completion":
cmd_common.print_bash_completion(CATEGORIES)
return 0
try:
fn, fn_args, fn_kwargs = cmd_common.get_action_fn()
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return ret
except Exception:
if CONF.post_mortem:
import pdb
pdb.post_mortem()
else:
print(_("An error has occurred:\n%s") % traceback.format_exc())
return 1
| {
"content_hash": "66b01c1aff5e481757307731c66e8041",
"timestamp": "",
"source": "github",
"line_count": 2341,
"max_line_length": 79,
"avg_line_length": 45.58393848782571,
"alnum_prop": 0.5767298897968364,
"repo_name": "gooddata/openstack-nova",
"id": "dc87e3f7ee115d02f7869c9294a2a8b466091f2d",
"size": "107538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/cmd/manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.template import Context, Template
from django.test import TestCase
from .models import OriginalImage, ResizedImage
from .utils import get_sized_image, get_sized_images
def _create_original(name):
src = os.path.join(
os.path.dirname(__file__),
'test_media',
name
)
with open(src, 'rb') as src_file:
orig_path = default_storage.save(
os.path.join('test_images', name),
ImageFile(src_file)
)
return OriginalImage.objects.create(image_file=orig_path)
def _clean_up_directory(path):
try:
os.rmdir(path)
except OSError:
pass
class SrcsetTests(TestCase):
def setUp(self):
self.orig1 = _create_original('image1.jpg') # 2688x1520
self.orig2 = _create_original('image2.jpg') # 300x170
def test_resize_one(self):
r1 = get_sized_image(self.orig1.image_file, (500, 500))
self.assertEqual(OriginalImage.objects.count(), 2)
self.assertEqual(ResizedImage.objects.count(), 1)
self.assertEqual(r1.image_file.name, os.path.join(
'responsive_images',
self.orig1.image_file.name,
'500x500_50-50.jpg'
))
def test_resize_one_nocrop(self):
# constrained by width
r1 = get_sized_image(self.orig1.image_file, (400, 600), crop=False)
self.assertEqual(r1.size, (400, 226))
# constrained by height
r2 = get_sized_image(self.orig1.image_file, (400, 200), crop=False)
self.assertEqual(r2.size, (354, 200))
def test_resize_larger(self):
r1 = get_sized_image(self.orig2.image_file, (500, 500))
self.assertFalse(ResizedImage.objects.exists())
self.assertEqual(r1.size, (300, 170))
self.assertEqual(r1.image_file.name, self.orig2.image_file.name)
def test_resize_cases(self):
r1 = get_sized_image(self.orig2.image_file, (200, 200))
self.assertEqual(r1.size, (200, 170))
self.assertTrue(r1.image_file.name.endswith('200x170_50-50.jpg'))
r2 = get_sized_image(self.orig2.image_file, (300, 150))
self.assertEqual(r2.size, (300, 150))
self.assertTrue(r2.image_file.name.endswith('300x150_50-50.jpg'))
r3 = get_sized_image(self.orig2.image_file, (200, 200), crop=None)
self.assertEqual(r3.size, (200, 113))
self.assertTrue(r3.image_file.name.endswith('200x113_nocrop.jpg'))
r4 = get_sized_image(self.orig2.image_file, (300, 150), crop=None)
self.assertEqual(r4.size, (265, 150))
self.assertTrue(r4.image_file.name.endswith('265x150_nocrop.jpg'))
def test_resize_same(self):
r1 = get_sized_image(self.orig1.image_file, (500, 500))
r2 = get_sized_image(self.orig1.image_file, (500, 500))
self.assertEqual(ResizedImage.objects.count(), 1)
self.assertEqual(r1, r2)
def test_resize_same_nocrop(self):
r1 = get_sized_image(self.orig1.image_file, (400, 600), crop=False)
r2 = get_sized_image(self.orig1.image_file, (400, 600), crop=False)
self.assertEqual(ResizedImage.objects.count(), 1)
self.assertEqual(r1, r2)
def test_resize_multiple(self):
(r1, r2, r3) = get_sized_images(self.orig1.image_file, [
(1000, 1000),
(2000, 2000),
(3000, 3000),
(4000, 4000),
])
self.assertEqual(OriginalImage.objects.count(), 2)
self.assertEqual(ResizedImage.objects.count(), 2)
self.assertEqual(r1.size, (1000, 1000))
self.assertTrue(r1.image_file.name.endswith('1000x1000_50-50.jpg'))
self.assertEqual(r2.size, (2000, 1520))
self.assertTrue(r2.image_file.name.endswith('2000x1520_50-50.jpg'))
self.assertEqual(r3.size, (2688, 1520))
self.assertEqual(r3.image_file.name, self.orig1.image_file.name)
def test_resize_multiple_nocrop(self):
(r1, r2, r3) = get_sized_images(self.orig1.image_file, [
(1000, 1000),
(2000, 2000),
(3000, 3000),
(4000, 4000),
], crop=None)
self.assertEqual(r1.size, (1000, 565))
self.assertTrue(r1.image_file.name.endswith('1000x565_nocrop.jpg'))
self.assertEqual(r2.size, (2000, 1131))
self.assertTrue(r2.image_file.name.endswith('2000x1131_nocrop.jpg'))
self.assertEqual(r3.size, (2688, 1520))
self.assertEqual(r3.image_file.name, self.orig1.image_file.name)
def test_src_tag(self):
template = Template('{% load responsive_images %}{% src image 500x500 %}')
context = Context({'image': self.orig1.image_file})
rendered = template.render(context)
self.assertEqual(
rendered,
os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'500x500_50-50.jpg'
)
)
self.assertEqual(ResizedImage.objects.count(), 1)
r1 = ResizedImage.objects.get()
self.assertEqual(r1.size, (500, 500))
def test_src_tag_nocrop(self):
template = Template('{% load responsive_images %}{% src image 500x500 nocrop %}')
context = Context({'image': self.orig1.image_file})
rendered = template.render(context)
self.assertEqual(
rendered,
os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'500x283_nocrop.jpg'
)
)
r1 = ResizedImage.objects.get()
self.assertEqual(r1.size, (500, 283))
def test_src_tag_crop(self):
template1 = Template('{% load responsive_images %}{% src image 500x500 %}')
template2 = Template('{% load responsive_images %}{% src image 500x500 crop %}')
template3 = Template('{% load responsive_images %}{% src image 500x500 center %}')
template4 = Template('{% load responsive_images %}{% src image 500x500 40,10 %}')
context = Context({'image': self.orig1.image_file})
rendered1 = template1.render(context)
rendered2 = template2.render(context)
rendered3 = template3.render(context)
rendered4 = template4.render(context)
self.assertEqual(ResizedImage.objects.count(), 2)
center_crop_url = os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'500x500_50-50.jpg'
)
self.assertEqual(rendered1, center_crop_url)
self.assertEqual(rendered2, center_crop_url)
self.assertEqual(rendered3, center_crop_url)
self.assertEqual(rendered4, os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'500x500_40-10.jpg'
))
for resized in ResizedImage.objects.all():
self.assertEqual(resized.size, (500, 500))
def test_src_tag_same(self):
template = Template('{% load responsive_images %}{% src image 500x500 %}')
context = Context({'image': self.orig1.image_file})
rendered1 = template.render(context)
rendered2 = template.render(context)
for rendered in [rendered1, rendered2]:
self.assertEqual(
rendered,
os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'500x500_50-50.jpg'
)
)
self.assertEqual(ResizedImage.objects.count(), 1)
def test_srcset_tag(self):
template = Template('{% load responsive_images %}{% srcset image 1000x1000 2000x2000 3000x3000 4000x4000 %}')
context = Context({'image': self.orig1.image_file})
rendered = template.render(context)
self.assertEqual(
rendered,
os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'1000x1000_50-50.jpg'
) + ' 1000w, '
+ os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'2000x1520_50-50.jpg'
) + ' 2000w, '
+ os.path.join(
settings.MEDIA_URL,
self.orig1.image_file.name,
) + ' 2688w'
)
self.assertEqual(ResizedImage.objects.count(), 2)
def test_srcset_tag_nocrop(self):
template = Template('{% load responsive_images %}{% srcset image 1000x1000 2000x2000 3000x3000 4000x4000 nocrop %}')
context = Context({'image': self.orig1.image_file})
rendered = template.render(context)
self.assertEqual(
rendered,
os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'1000x565_nocrop.jpg'
) + ' 1000w, '
+ os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig1.image_file.name,
'2000x1131_nocrop.jpg'
) + ' 2000w, '
+ os.path.join(
settings.MEDIA_URL,
self.orig1.image_file.name,
) + ' 2688w'
)
self.assertEqual(ResizedImage.objects.count(), 2)
def test_srcset_tag_same_width(self):
template = Template('{% load responsive_images %}{% srcset image 450x150 600x200 %}')
context = Context({'image': self.orig2.image_file})
rendered = template.render(context)
self.assertEqual(
rendered,
os.path.join(
settings.MEDIA_URL,
'responsive_images',
self.orig2.image_file.name,
'300x150_50-50.jpg'
) + ' 300w'
)
self.assertEqual(ResizedImage.objects.count(), 1)
def tearDown(self):
for image in OriginalImage.objects.all():
image.image_file.delete(save=False)
for image in ResizedImage.objects.all():
image.image_file.delete(save=False)
_clean_up_directory(os.path.join(settings.MEDIA_ROOT, 'test_images'))
_clean_up_directory(os.path.join(
settings.MEDIA_ROOT,
'responsive_images',
self.orig1.image_file.name,
))
_clean_up_directory(os.path.join(
settings.MEDIA_ROOT,
'responsive_images',
self.orig2.image_file.name,
))
_clean_up_directory(os.path.join(
settings.MEDIA_ROOT,
'responsive_images',
'test_images'
))
| {
"content_hash": "d646d3f5255b641579a840598b45feb3",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 124,
"avg_line_length": 38.95406360424028,
"alnum_prop": 0.5737481857764877,
"repo_name": "ConvectiveSoftware/django-responsive-images",
"id": "77aba5f1b8ca4b375056c0bc6822adb59f02deae",
"size": "11024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "responsive_images/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18318"
}
],
"symlink_target": ""
} |
from functools import lru_cache, wraps
from os.path import isfile, join as join_paths
from pickle import dumps as pickle_dumps, loads as pickle_loads
from ines import DEFAULT_RETRY_ERRNO, lazy_import_module, MARKER, NEW_LINE_AS_BYTES, NOW_TIME
from ines.cleaner import clean_string
from ines.convert import make_sha256, maybe_integer, maybe_list, maybe_set, to_bytes
from ines.locks import LockMe, LockMeMemcached
from ines.utils import (
file_modified_time, get_file_binary, make_dir, make_uuid_hash, move_file, put_binary_on_file, remove_file_quietly)
class _SaveMe(object):
def get_binary(self, name, expire=MARKER):
pass
def put_binary(self, name, binary, mode='put', expire=MARKER):
pass
def __delitem__(self, name):
pass
def __contains__(self, name):
pass
def get_values(self, name, expire=MARKER):
try:
binary = self.get_binary(name, expire)
except KeyError:
return []
else:
return binary.splitlines()
def extend_values(self, name, values, expire=MARKER):
if not values:
raise ValueError('Define some values')
binary = NEW_LINE_AS_BYTES.join(map(to_bytes, values)) + NEW_LINE_AS_BYTES
self.put_binary(name, binary, mode='append', expire=expire)
def append_value(self, name, value, expire=MARKER):
self.extend_values(name, [value], expire=expire)
def replace_values(self, name, values, expire=MARKER):
values = maybe_list(values)
if not values:
self.remove(name)
else:
self.put_binary(
name,
binary=NEW_LINE_AS_BYTES.join(values) + NEW_LINE_AS_BYTES,
expire=expire)
def __getitem__(self, name):
value = self.get(name, default=MARKER)
if value is MARKER:
raise KeyError('Missing cache key "%s"' % name)
else:
return value
def __setitem__(self, name, info):
self.put(name, info)
def get(self, name, default=None, expire=MARKER):
try:
binary = self.get_binary(name, expire=expire)
except KeyError:
return default
else:
try:
value = pickle_loads(binary)
except EOFError:
# Something goes wrong! Delete file to prevent more errors
self.remove(name)
raise
else:
return value
def put(self, name, info, expire=MARKER):
info = pickle_dumps(info)
self.put_binary(name, info, expire=expire)
def remove(self, name):
del self[name]
class SaveMe(_SaveMe):
def __init__(
self,
path,
expire=None,
retry_errno=None,
retries=3,
**lock_settings):
self.expire = maybe_integer(expire)
self.path = make_dir(path)
self.retries = maybe_integer(retries) or 3
self.retry_errno = maybe_set(retry_errno)
self.retry_errno.update(DEFAULT_RETRY_ERRNO)
# Lock settings
settings = {}
for key, value in list(lock_settings.items()):
if key.startswith('lock_'):
settings[key.split('lock_', 1)[1]] = value
lock_path = settings.pop('path', None) or join_paths(self.path, 'locks')
self.lockme = LockMe(lock_path, **settings)
def lock(self, *args, **kwargs):
return self.lockme.lock(*args, **kwargs)
def unlock(self, *args, **kwargs):
return self.lockme.unlock(*args, **kwargs)
@lru_cache(1000)
def get_file_path(self, name):
name_256 = make_sha256(name)
return join_paths(self.path, name_256[0], name_256)
def _contains(self, path, expire=MARKER):
if expire is MARKER:
expire = self.expire
if expire:
modified_time = file_modified_time(path)
if not modified_time:
return False
if (modified_time + self.expire) < NOW_TIME():
self._delete_path(path)
return False
return isfile(path)
def __contains__(self, name):
return self._contains(self.get_file_path(name))
def _get_binary(self, path, expire=MARKER):
if expire is MARKER:
expire = self.expire
if expire:
modified_time = file_modified_time(path)
if not modified_time:
return None
elif (modified_time + expire) < NOW_TIME():
self._delete_path(path)
return None
return get_file_binary(path, mode='rb', retries=self.retries, retry_errno=self.retry_errno)
def get_binary(self, name, expire=MARKER):
binary = self._get_binary(self.get_file_path(name), expire)
if binary is None:
raise KeyError('Missing cache key "%s"' % name)
else:
return binary
def put_binary(self, name, binary, mode='put', expire=MARKER):
mode = 'ab' if mode == 'append' else 'wb'
put_binary_on_file(self.get_file_path(name), binary, mode, retries=self.retries, retry_errno=self.retry_errno)
def _delete_path(self, path):
remove_file_quietly(path, retries=self.retries, retry_errno=self.retry_errno)
def __delitem__(self, name):
file_path = self.get_file_path(name)
self._delete_path(file_path)
class SaveMeWithReference(SaveMe):
def __init__(self, *args, **kwargs):
super(SaveMeWithReference, self).__init__(*args, **kwargs)
self.reference_path = make_dir(join_paths(self.path, 'references'))
def put_binary(self, name, *args, **kwargs):
super(SaveMeWithReference, self).put_binary(name, *args, **kwargs)
self.put_reference(name)
def __delitem__(self, name):
super(SaveMeWithReference, self).__delitem__(name)
self.remove_reference(name)
def get_reference_path(self, name):
first_name = name.split(' ', 1)[0]
first_name_256 = make_sha256(first_name)
return join_paths(self.reference_path, first_name_256[0], first_name_256)
def _get_references(self, path, name):
references = set()
binary = self._get_binary(path, expire=None)
if binary is not None:
for saved_name in binary.splitlines():
if saved_name and saved_name.startswith(name):
references.add(saved_name)
return references
def get_references(self, name):
file_path = self.get_reference_path(name)
return self._get_references(file_path, name)
def put_reference(self, name):
if name not in self.get_references(name):
put_binary_on_file(
self.get_reference_path(name),
NEW_LINE_AS_BYTES.join([to_bytes(name), b'']),
mode='ab',
retries=self.retries,
retry_errno=self.retry_errno)
def remove_reference(self, name, expire=MARKER):
file_path = self.get_reference_path(name)
temporary_file_path = file_path + '.' + make_uuid_hash()
move_file(file_path, temporary_file_path, retries=self.retries, retry_errno=self.retry_errno)
references = self._get_references(temporary_file_path, name='')
if name in references:
references.remove(name)
# Validate if references still exists
if references:
if expire is MARKER:
# Dont use expire, we only need to know if file exists
expire = None
for name in references:
path = self.get_file_path(name)
if not self._contains(path, expire=expire):
references.remove(name)
if references:
references = maybe_list(references)
references.append(NEW_LINE_AS_BYTES)
put_binary_on_file(
file_path,
binary=NEW_LINE_AS_BYTES.join(map(to_bytes, references)),
mode='ab',
retries=self.retries,
retry_errno=self.retry_errno)
self._delete_path(temporary_file_path)
def get_children(self, name, expire=MARKER):
result = {}
missing_reference = False
for reference in self.get_references(name):
value = self.get(reference, MARKER, expire=expire)
if value is not MARKER:
result[reference] = value
else:
missing_reference = True
if missing_reference:
self.remove_reference(name, expire)
return result
def remove_children(self, name):
for reference in self.get_references(name):
self.remove(reference)
def __contains__(self, name):
file_path = self.get_reference_path(name)
binary = self._get_binary(file_path, expire=None)
if binary is not None:
return name in binary.splitlines()
else:
return False
class SaveMeMemcached(_SaveMe):
def __init__(
self,
url,
expire=None,
**settings):
# Lock settings
lock_settings = {}
for key in list(settings.keys()):
if key.startswith('lock_'):
lock_settings[key.split('lock_', 1)[1]] = settings.pop(key)
lock_settings.update(settings)
self.memcache_module = lazy_import_module('memcache')
self.memcache = self.memcache_module.Client(url.split(';'), **settings)
self.expire = maybe_integer(expire)
self.lockme = LockMeMemcached(url, **lock_settings)
def lock(self, *args, **kwargs):
return self.lockme.lock(*args, **kwargs)
def unlock(self, *args, **kwargs):
return self.lockme.unlock(*args, **kwargs)
def format_name(self, name):
return to_bytes(make_sha256(name))
def __contains__(self, name):
return self.memcache.get(self.format_name(name)) is not None
def get_binary(self, name, expire=MARKER):
binary = self.memcache.get(self.format_name(name))
if binary is None:
raise KeyError('Missing cache key "%s"' % name)
else:
return binary
def put_binary(self, name, binary, mode='wb', expire=MARKER):
name_256 = self.format_name(name)
if expire is MARKER:
expire = self.expire
# Append to existing file
if mode == 'append' and name in self:
self.memcache.append(name_256, binary, time=expire or 0)
else:
self.memcache.set(name_256, binary, time=expire or 0)
def __delitem__(self, name):
self.memcache.delete(self.format_name(name))
class api_cache_decorator(object):
def __init__(self, expire_seconds=900):
self.cache_name = None
self.wrapper = None
self.expire_seconds = expire_seconds
self.father = None
self.children = []
def __call__(self, wrapped):
@wraps(wrapped)
def wrapper(cls, expire_cache=False, no_cache=False):
if expire_cache:
return self.expire(cls)
elif not no_cache:
cached = cls.config.cache.get(self.cache_name, default=MARKER, expire=self.expire_seconds)
if cached is not MARKER:
return cached
cached = wrapped(cls)
cls.config.cache.put(self.cache_name, cached, expire=self.expire_seconds)
return cached
self.cache_name = 'ines.api_cache_decorator %s %s' % (wrapped.__module__, wrapped.__qualname__)
self.wrapper = wrapper
return wrapper
def child(self, expire_seconds=MARKER):
if expire_seconds is MARKER:
expire_seconds = self.expire_seconds
new = api_cache_decorator(expire_seconds=expire_seconds)
new.father = self
self.children.append(new)
return new
def expire(self, api_session, expire_children=False, ignore_father=False):
if self.wrapper and self.cache_name:
if expire_children and self.children:
for child in self.children:
child.expire(api_session, ignore_father=True)
clear_paths = []
for app_session in api_session.applications.asdict().values():
cache_path = app_session.cache.path
if cache_path not in clear_paths:
clear_paths.append(cache_path)
app_session.cache.remove(self.cache_name)
if not ignore_father and self.father:
self.father.expire(api_session)
return True
return False
def clear_lock_key(key):
return clean_string(key).lower().strip().replace(' ', '')
def api_lock_decorator(prefix=None, args_indexes=None, kwargs_names=None, clear_keys_method=None):
def decorator(wrapped):
pre_name = prefix and prefix or 'ines.api_lock_decorator %s %s' % (wrapped.__module__, wrapped.__qualname__)
clear_method = clear_keys_method or clear_lock_key
@wraps(wrapped)
def wrapper(cls, *args, **kwargs):
names = []
if args_indexes:
for index in args_indexes:
names.append(clear_method(args[index]))
if kwargs_names:
for name in kwargs_names:
names.append(clear_method(kwargs[name]))
lock_name = names and ('%s %s' % (pre_name, ' '.join(names))) or pre_name
print(111, lock_name, type(lock_name))
try:
cls.config.cache.lock(lock_name)
return wrapped(cls, *args, **kwargs)
finally:
cls.config.cache.unlock(lock_name)
return wrapper
return decorator
| {
"content_hash": "2135d15acb90c2efa7dfc236054cde2c",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 118,
"avg_line_length": 33.648910411622275,
"alnum_prop": 0.57832625746564,
"repo_name": "hugobranquinho/ines",
"id": "130c4a2701d373d802834da4494664c8cb65c2c7",
"size": "13922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ines/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "435125"
}
],
"symlink_target": ""
} |
import sys
import requests
import json
import re
from requests.auth import HTTPBasicAuth
from requests.packages.urllib3.exceptions import InsecureRequestWarning
def get_wapiurl(niosip, niosuser, niospw):
# Determine the WAPI version and get the WAPI URL
docurl = 'https://%s/wapidoc/' % niosip
r = requests.get(docurl, verify=False)
version = re.search(r'^\s*VERSION:\s*\'(.*)\',', r.text, re.MULTILINE)
wapiversion = version.group(1)
wapiurl = 'https://%s/wapi/v%s/' % (niosip, wapiversion)
return wapiurl
def get_nextips(subnet, num, wapiurl, niosuser, niospw):
# Get the network object reference
joburl = wapiurl + 'network'
payload = {'network': subnet}
resp = requests.get(joburl, auth=HTTPBasicAuth(niosuser, niospw),verify=False,params=payload)
j = resp.json()
k = j[0]
netref = k['_ref']
# Get the requested number of IPs and return a list
joburl = wapiurl + netref + '?_function=next_available_ip'
payload = {'num': num}
resp = requests.post(joburl, auth=HTTPBasicAuth(niosuser, niospw),verify=False,params=payload)
j = resp.json()
k = j['ips']
return k
def get_nextnets(subnet, cidr, wapiurl, niosuser, niospw):
# Get the network object reference
joburl = wapiurl + 'networkcontainer'
payload = {'network': subnet}
resp = requests.get(joburl, auth=HTTPBasicAuth(niosuser, niospw),verify=False,params=payload)
j = resp.json()
k = j[0]
netref = k['_ref']
# Get the requested number of IPs and return a list
joburl = wapiurl + netref + '?_function=next_available_network'
payload = {'cidr': cidr}
resp = requests.post(joburl, auth=HTTPBasicAuth(niosuser, niospw),verify=False,params=payload)
j = resp.json()
k = j['networks']
return k
#def create_host(host, ip, wapiurl, niosuser, niospw):
# joburl = wapiurl + 'record:host'
# print joburl
# payload = '{"ipv4addrs": [{"ipv4addr": "10.138.0.2"}], "name": "test1.test.com"}'
# print payload
# resp = requests.post(joburl, auth=HTTPBasicAuth(niosuser, niospw),verify=False,params=payload)
# j = resp.json()
# return j
| {
"content_hash": "9378ab60989e207834aef1827199e50d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 96,
"avg_line_length": 36.05357142857143,
"alnum_prop": 0.7102526002971769,
"repo_name": "brampling/infoblox-gcp-poc",
"id": "9c76ce2877687516845f7cd180ea9279d09d345a",
"size": "2019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "niosutils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13399"
}
],
"symlink_target": ""
} |
"""Contains very basic functions
"""
import os
import shutil
import numpy as np
from pyproj import CRS, Transformer
def get_result_paths(folder_path):
"""Joins results subfolders to ``folder_path`` and returns a dict
Arguments
---------
folder_path : str
Path of scenario run results
Returns
-------
path_dict : dict
Dict with all sub_folder paths
"""
path_dict = {
'data_results': folder_path,
'data_results_model_run_pop': os.path.join(folder_path, 'model_run_pop'),
'data_results_validation': os.path.join(folder_path, 'PDF_validation'),
'data_results_model_run_results_txt': os.path.join(folder_path, 'model_run_results_txt'),
'data_results_PDF': os.path.join(folder_path, 'PDF_results')}
return path_dict
def dict_depth(dictionary):
"""Get depth of nested dict
"""
if isinstance(dictionary, dict):
return 1 + (max(map(dict_depth, dictionary.values())) if dictionary else 0)
return 0
def test_if_sector(dict_to_test, fuel_as_array=False):
"""Test if a dictionary contains also sector information
Arguments
---------
dict_to_test : dict
Dict with info to test
Returns
-------
crit_sector_data : bool
Criteria wheter nested or not
Example
-------
{0: 23, 1: 3434}} --> False
{'sector': {0: 23, 1: 3434}} --> True
"""
get_dict_depth = dict_depth(dict_to_test)
if fuel_as_array: # if given as array, one level less
if get_dict_depth == 1:
crit_sector_data = False
elif get_dict_depth == 2:
crit_sector_data = True
else:
if get_dict_depth == 2:
crit_sector_data = False
elif get_dict_depth == 3:
crit_sector_data = True
return crit_sector_data
def round_down(num, divisor):
"""Round down
"""
return num - (num%divisor)
def get_all_folders_files(path):
"""Return all folders and file names in a list
Input
-----
path : str
Path to folder
Returns
--------
all_folders : list
All folders in a folder
filenames : list
All file names in a list
"""
folders_walk = os.walk(path)
for root, dirnames, filenames in folders_walk:
all_folders = list(dirnames)
#all_files = list(filenames)
break
return all_folders #, all_files
def assign_array_to_dict(array_in, regions):
"""Convert array to dict with same order as region list
Input
-----
regions : list
List with specific order of regions
array_in : array
Data array with data like the order of the region list
Returns
-------
dict_out : dict
Dictionary of array_in
"""
dict_out = {}
for r_idx, region in enumerate(regions):
dict_out[region] = array_in[r_idx, 0]
return dict_out
def get_long_lat_decimal_degrees(reg_centroids):
"""Project coordinates from shapefile to get
decimal degrees (from OSGB_1936_British_National_Grid to
WGS 84 projection).
Arguments
---------
reg_centroids : dict
Centroid information read in from shapefile via smif
Return
-------
reg_coord : dict
Contains long and latidue for every region in decimal degrees
Info
----
http://spatialreference.org/ref/epsg/wgs-84/
"""
reg_coord = {}
crs_4326 = CRS.from_epsg(4326) # WGS 84 projection
crs_27700 = CRS.from_epsg(27700) # OSGB_1936_British_National_Grid
t = Transformer.from_crs(crs_27700, crs_4326)
for centroid in reg_centroids:
# Convert to decimal degrees
long_dd, lat_dd = t.transform(
centroid['geometry']['coordinates'][0],
centroid['geometry']['coordinates'][1]
)
reg_coord[centroid['properties']['name']] = {}
reg_coord[centroid['properties']['name']]['latitude'] = lat_dd
reg_coord[centroid['properties']['name']]['longitude'] = long_dd
return reg_coord
def rmse(predictions, actual_values):
"""Root-mean-square deviation or
Root-mean-square-erro (RMSE) calculations
Arguments
----------
predictions : array
Model prediction (real value)
actual_values : array
Moodelled value
Returns
-------
rmse : array
root-mean-square deviation
Info
-----
Alternative way
from sklearn.metrics import mean_squared_error
from math import sqrt
rms = sqrt(mean_squared_error(y_actual, y_predicted))
"""
return np.sqrt(((predictions - actual_values) ** 2).mean())
def array_to_dict(result_array, regions):
"""Convert an array with regions to dict
with region as key
Arguments
---------
result_array : array
Results in region_array
regions : list
List with all regions (order is the same)
Returns
--------
result_dict : dict
reg, value
"""
result_dict = {}
for reg_array_nr, region in enumerate(regions):
result_dict[region] = result_array[reg_array_nr]
return result_dict
def create_folder(path_folder, name_subfolder=None):
"""Creates folder or subfolder
Arguments
----------
path : str
Path to folder
folder_name : str, default=None
Name of subfolder to create
"""
if not name_subfolder:
if not os.path.exists(path_folder):
os.makedirs(path_folder)
else:
path_result_subolder = os.path.join(path_folder, name_subfolder)
if not os.path.exists(path_result_subolder):
os.makedirs(path_result_subolder)
def delete_folder(path_folder):
"""Delete folder or subfolder
Arguments
----------
path : str
Path to folder
folder_name : str, default=None
Name of subfolder to create
"""
if os.path.exists(path_folder):
shutil.rmtree(path_folder)
def del_previous_results(path_folder, path_subfolder_keep):
"""Delete all model results from previous model run. Do not
delete post installation setup files
Arguments
---------
path_folder : str
Path to results of model run
path_subfolder_keep : str
Path of subfolder which must not be deleted
"""
if os.path.exists(path_folder):
all_files_and_folders = os.listdir(path_folder)
# Iterate folders in data folders
for entry in all_files_and_folders:
path_subfolder = os.path.join(path_folder, entry)
# Do not deleted post installation files
if path_subfolder != path_subfolder_keep:
shutil.rmtree(path_subfolder)
else:
pass
def del_previous_setup(path_folder):
"""Delete all model results from previous model run
Arguments
---------
path_folder : str
Path to results of model run
"""
if os.path.exists(path_folder):
shutil.rmtree(path_folder, ignore_errors=True)
else:
pass
def del_file(path_file):
"""Delete all model results from previous model run
Arguments
---------
path_folder : str
Path to results of model run
"""
if os.path.isfile(path_file):
os.remove(path_file)
else:
pass
def get_month_from_string(month_string):
"""Convert string month to int month with Jan == 1
Arguments
----------
month_string : str
Month given as a string
Returns
--------
month : int
Month as an integer (jan = 1, dez = 12)
"""
if month_string == 'Jan':
month = 1
elif month_string == 'Feb':
month = 2
elif month_string == 'Mar':
month = 3
elif month_string == 'Apr':
month = 4
elif month_string == 'May':
month = 5
elif month_string == 'Jun':
month = 6
elif month_string == 'Jul':
month = 7
elif month_string == 'Aug':
month = 8
elif month_string == 'Sep':
month = 9
elif month_string == 'Oct':
month = 10
elif month_string == 'Nov':
month = 11
elif month_string == 'Dec':
month = 12
return int(month)
def get_month_from_int(month_int):
"""Convert inger month to string month with Jan == 1
Arguments
---------
month_int : str
Month given as a integer
Returns
--------
month : int
Month as an integer (jan = 1, dez = 12)
"""
if month_int == 1:
month_str = 'Jan'
elif month_int == 2:
month_str = 'Feb'
elif month_int == 3:
month_str = 'Mar'
elif month_int == 4:
month_str = 'Apr'
elif month_int == 5:
month_str = 'May'
elif month_int == 6:
month_str = 'Jun'
elif month_int == 7:
month_str = 'Jul'
elif month_int == 8:
month_str = 'Aug'
elif month_int == 9:
month_str = 'Sep'
elif month_int == 10:
month_str = 'Oct'
elif month_int == 11:
month_str = 'Nov'
elif month_int == 12:
month_str = 'Dec'
return str(month_str)
def remove_element_from_list(input_list, element):
"""Remove element in list
Arguments
---------
input_list : list
List with elements
element : any
Element to remove
Returns
-------
list_new : list
List where element is removed
"""
list_new = []
for i in input_list:
if i == element:
_ = 0
else:
list_new.append(i)
return list_new
| {
"content_hash": "624912362d2a2d3b7a9fd3ce1b9111b8",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 97,
"avg_line_length": 24.484615384615385,
"alnum_prop": 0.578594617237407,
"repo_name": "nismod/energy_demand",
"id": "fafe13cdd0a1b4f5ab0b5ac73e21cb537ebf58c2",
"size": "9549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "energy_demand/basic/basic_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1432899"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
} |
import unittest
from os.path import join
from unittest import TestCase
from devilry.utils.importutils import get_staticdir_from_appname
@unittest.skip('Is this in use? If so TestImportUtils should be fixed.')
class TestImportUtils(TestCase):
def test_get_staticdir_from_appname(self):
staticdir = get_staticdir_from_appname('test',
[(join('path', 'to'), None, 'something'),
(join('another', 'dir'), None, 'test')])
self.assertEqual(staticdir, join('another', 'dir', 'static', 'test'))
self.assertRaises(ValueError, get_staticdir_from_appname, 'test', [])
| {
"content_hash": "9bc9d2a6a2f8d408247b74b88910f120",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 88,
"avg_line_length": 45.4,
"alnum_prop": 0.6093979441997063,
"repo_name": "devilry/devilry-django",
"id": "2fad5f083970176a95054d43b9f53598ab6aa0df",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/utils/tests/importutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
} |
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.paged_cloud_integration import PagedCloudIntegration # noqa: E501
from wavefront_api_client.rest import ApiException
class TestPagedCloudIntegration(unittest.TestCase):
"""PagedCloudIntegration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPagedCloudIntegration(self):
"""Test PagedCloudIntegration"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.paged_cloud_integration.PagedCloudIntegration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "154bd2b77ff2b95c9c0432a34599f942",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 409,
"avg_line_length": 34.81578947368421,
"alnum_prop": 0.7301587301587301,
"repo_name": "wavefrontHQ/python-client",
"id": "3c00d3aecccddce5db69db0546f17d8e7e819cb6",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_paged_cloud_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
} |
from pennyblack.models import Newsletter
from pennyblack.content.richtext import TextOnlyNewsletterContent, \
TextWithImageNewsletterContent
Newsletter.register_templates({
'key': 'base',
'title': 'Generic Newsletter',
'path': 'pennyblack/base_newsletter.html',
'regions': (
('main', 'Main Region'),
),
})
Newsletter.create_content_type(TextOnlyNewsletterContent)
Newsletter.create_content_type(TextWithImageNewsletterContent)
| {
"content_hash": "1677837e09a1c3ed435f319cfad645b3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 31.266666666666666,
"alnum_prop": 0.7334754797441365,
"repo_name": "nickburlett/pennyblack",
"id": "da062e0f04ac48c080d560ae1dd693cd088efe20",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "20283"
},
{
"name": "Python",
"bytes": "90654"
}
],
"symlink_target": ""
} |
""" Dataframe optimizations """
from __future__ import absolute_import, division, print_function
from .io import dataframe_from_ctable
from ..optimize import cull, fuse_getitem, fuse_selections
from .. import core
def fuse_castra_index(dsk):
from castra import Castra
def merge(a, b):
return (Castra.load_index, b[1], b[2]) if a[2] == 'index' else a
return fuse_selections(dsk, getattr, Castra.load_partition, merge)
def optimize(dsk, keys, **kwargs):
if isinstance(keys, list):
dsk2, dependencies = cull(dsk, list(core.flatten(keys)))
else:
dsk2, dependencies = cull(dsk, [keys])
try:
from castra import Castra
dsk3 = fuse_getitem(dsk2, Castra.load_partition, 3)
dsk4 = fuse_castra_index(dsk3)
except ImportError:
dsk4 = dsk2
dsk5 = fuse_getitem(dsk4, dataframe_from_ctable, 3)
dsk6, _ = cull(dsk5, keys)
return dsk6
| {
"content_hash": "9481b4189968c5599f09ab86efcce40f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.657608695652174,
"repo_name": "jeffery-do/Vizdoombot",
"id": "24980def973ddd0f6d493e34e3e708342210a0e9",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/optimize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "465717"
},
{
"name": "C++",
"bytes": "219269"
},
{
"name": "CSS",
"bytes": "7132"
},
{
"name": "Cuda",
"bytes": "232079"
},
{
"name": "FORTRAN",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "7089"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "37513702"
},
{
"name": "Shell",
"bytes": "3838"
}
],
"symlink_target": ""
} |
import os
import glob
import sys
import platform
import subprocess
import difflib
import filecmp
from optparse import OptionParser
#
# Get standard testsuite test arguments: srcdir exepath
#
srcdir = "."
tmpdir = "."
path = "../.."
# Options for the command line
parser = OptionParser()
parser.add_option("-p", "--path", help="add to executable path",
action="store", type="string", dest="path", default="")
parser.add_option("--devenv-config", help="use a MS Visual Studio configuration",
action="store", type="string", dest="devenv_config", default="")
parser.add_option("--solution-path", help="MS Visual Studio solution path",
action="store", type="string", dest="solution_path", default="")
(options, args) = parser.parse_args()
if args and len(args) > 0 :
srcdir = args[0]
srcdir = os.path.abspath (srcdir) + "/"
os.chdir (srcdir)
if args and len(args) > 1 :
path = args[1]
path = os.path.normpath (path)
tmpdir = "."
tmpdir = os.path.abspath (tmpdir)
refdir = "ref/"
parent = "../../../../../"
command = ""
outputs = [ "out.txt" ] # default
failureok = 0
failthresh = 0.004
hardfail = 0.01
failpercent = 0.02
#print ("srcdir = " + srcdir)
#print ("tmpdir = " + tmpdir)
#print ("path = " + path)
#print ("refdir = " + refdir)
###########################################################################
# Handy functions...
# Compare two text files. Returns 0 if they are equal otherwise returns
# a non-zero value and writes the differences to "diff_file".
# Based on the command-line interface to difflib example from the Python
# documentation
def text_diff (fromfile, tofile, diff_file=None):
import time
try:
fromdate = time.ctime (os.stat (fromfile).st_mtime)
todate = time.ctime (os.stat (tofile).st_mtime)
fromlines = open (fromfile, 'rU').readlines()
tolines = open (tofile, 'rU').readlines()
except:
print ("Unexpected error:", sys.exc_info()[0])
return -1
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile,
fromdate, todate)
# Diff is a generator, but since we need a way to tell if it is
# empty we just store all the text in advance
diff_lines = [l for l in diff]
if not diff_lines:
return 0
if diff_file:
try:
open (diff_file, 'w').writelines (diff_lines)
except:
print ("Unexpected error:", sys.exc_info()[0])
return 1
def oiio_relpath (path, start=os.curdir):
"Wrapper around os.path.relpath which always uses '/' as the separator."
p = os.path.relpath (path, start)
return p if sys.platform != "win32" else p.replace ('\\', '/')
def oiio_app (app):
# When we use Visual Studio, built applications are stored
# in the app/$(OutDir)/ directory, e.g., Release or Debug.
if (platform.system () != 'Windows' or options.devenv_config == ""):
return os.path.join (path, "src", app, app) + " "
else:
return os.path.join (path, "src", app, options.devenv_config, app) + " "
# Construct a command that will print info for an image, appending output to
# the file "out.txt". If 'safematch' is nonzero, it will exclude printing
# of fields that tend to change from run to run or release to release.
def info_command (file, extraargs="", safematch=0, hash=True) :
if safematch :
extraargs += " --no-metamatch \"DateTime|Software|OriginatingProgram|ImageHistory\""
if hash :
extraargs += " --hash"
return (oiio_app("oiiotool") + "--info -v -a " + extraargs
+ " " + oiio_relpath(file,tmpdir) + " >> out.txt ;\n")
# Construct a command that will compare two images, appending output to
# the file "out.txt". We allow a small number of pixels to have up to
# 1 LSB (8 bit) error, it's very hard to make different platforms and
# compilers always match to every last floating point bit.
def diff_command (fileA, fileB, extraargs="", silent=False, concat=True) :
command = (oiio_app("idiff") + "-a"
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " " + extraargs + " " + oiio_relpath(fileA,tmpdir)
+ " " + oiio_relpath(fileB,tmpdir))
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
return command
# Construct a command that will create a texture, appending console
# output to the file "out.txt".
def maketx_command (infile, outfile, extraargs="",
showinfo=False, showinfo_extra="",
silent=False, concat=True) :
command = (oiio_app("maketx")
+ " " + oiio_relpath(infile,tmpdir)
+ " " + extraargs
+ " -o " + oiio_relpath(outfile,tmpdir) )
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
if showinfo:
command += info_command (outfile, extraargs=showinfo_extra, safematch=1)
return command
# Construct a command that will test the basic ability to read and write
# an image, appending output to the file "out.txt". First, iinfo the
# file, including a hash (VERY unlikely not to match if we've read
# correctly). If testwrite is nonzero, also iconvert the file to make a
# copy (tests writing that format), and then idiff to make sure it
# matches the original.
def rw_command (dir, filename, testwrite=1, use_oiiotool=0, extraargs="",
preargs="", idiffextraargs="") :
fn = oiio_relpath (dir + "/" + filename, tmpdir)
cmd = (oiio_app("oiiotool") + " --info -v -a --hash " + fn
+ " >> out.txt ;\n")
if testwrite :
if use_oiiotool :
cmd = (cmd + oiio_app("oiiotool") + preargs + " " + fn
+ " " + extraargs + " -o " + filename + " >> out.txt ;\n")
else :
cmd = (cmd + oiio_app("iconvert") + preargs + " " + fn
+ " " + extraargs + " " + filename + " >> out.txt ;\n")
cmd = (cmd + oiio_app("idiff") + " -a " + fn
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " " + idiffextraargs + " " + filename + " >> out.txt ;\n")
return cmd
# Construct a command that will testtex
def testtex_command (file, extraargs="") :
cmd = (oiio_app("testtex") + " " + file + " " + extraargs + " " +
" >> out.txt ;\n")
return cmd
# Construct a command that will run oiiotool and append its output to out.txt
def oiiotool (args, silent=False, concat=True) :
cmd = (oiio_app("oiiotool") + " " + args)
if not silent :
cmd += " >> out.txt"
if concat:
cmd += " ;\n"
return cmd
# Run 'command'. For each file in 'outputs', compare it to the copy
# in 'ref/'. If all outputs match their reference copies, return 0
# to pass. If any outputs do not match their references return 1 to
# fail.
def runtest (command, outputs, failureok=0) :
# print ("working dir = " + tmpdir)
os.chdir (srcdir)
open ("out.txt", "w").close() # truncate out.txt
if options.path != "" :
sys.path = [options.path] + sys.path
print ("command = " + command)
test_environ = None
if (platform.system () == 'Windows') and (options.solution_path != "") and \
(os.path.isdir (options.solution_path)):
test_environ = os.environ
libOIIO_args = [options.solution_path, "libOpenImageIO"]
if options.devenv_config != "":
libOIIO_args.append (options.devenv_config)
libOIIO_path = os.path.normpath (os.path.join (*libOIIO_args))
test_environ["PATH"] = libOIIO_path + ';' + test_environ["PATH"]
for sub_command in [c.strip() for c in command.split(';') if c.strip()]:
cmdret = subprocess.call (sub_command, shell=True, env=test_environ)
if cmdret != 0 and failureok == 0 :
print ("#### Error: this command failed: ", sub_command)
print ("FAIL")
return (1)
err = 0
for out in outputs :
extension = os.path.splitext(out)[1]
ok = 0
# We will first compare out to ref/out, and if that fails, we
# will compare it to everything else with the same extension in
# the ref directory. That allows us to have multiple matching
# variants for different platforms, etc.
for testfile in (["ref/"+out] + glob.glob (os.path.join ("ref", "*"+extension))) :
# print ("comparing " + out + " to " + testfile)
if extension == ".tif" or extension == ".exr" or extension == ".jpg" or extension == ".png":
# images -- use idiff
cmpcommand = diff_command (out, testfile, concat=False)
# print ("cmpcommand = " + cmpcommand)
cmpresult = os.system (cmpcommand)
elif extension == ".txt" :
cmpresult = text_diff (out, testfile, out + ".diff")
else :
# anything else
cmpresult = 0 if filecmp.cmp (out, testfile) else 1
if cmpresult == 0 :
print ("PASS: " + out + " matches " + testfile)
ok = 1
break # we're done
if ok == 0:
err = 1
print ("NO MATCH for " + out)
print ("FAIL " + out)
return (err)
##########################################################################
#
# Read the individual run.py file for this test, which will define
# command and outputs.
#
with open("run.py") as f:
code = compile(f.read(), "run.py", 'exec')
exec (code)
# Run the test and check the outputs
ret = runtest (command, outputs, failureok=failureok)
sys.exit (ret)
| {
"content_hash": "c4ddab543f52d2212fa6428ccce9b4b4",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 104,
"avg_line_length": 35.910071942446045,
"alnum_prop": 0.5721726935790844,
"repo_name": "scott-wilson/oiio",
"id": "501ac76d3d222b6b1d55247d22023d96693d0e3f",
"size": "10006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testsuite/runtest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "22617"
},
{
"name": "C++",
"bytes": "5086549"
},
{
"name": "CMake",
"bytes": "130011"
},
{
"name": "Makefile",
"bytes": "16116"
},
{
"name": "Objective-C",
"bytes": "70215"
},
{
"name": "Python",
"bytes": "149408"
},
{
"name": "Shell",
"bytes": "3367"
},
{
"name": "TeX",
"bytes": "743150"
}
],
"symlink_target": ""
} |
from myhvac_web.myhvac_service import factory
import logging
LOG = logging.getLogger(__name__)
def get_system_state():
service = factory.get_service_module()
return service.get_system_state() | {
"content_hash": "4231bfc8c1488e9a235a01483df88ba4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 18.545454545454547,
"alnum_prop": 0.7303921568627451,
"repo_name": "alanquillin/myhvac_web",
"id": "288593f0aba24ae9c8c87fd652c84f0ef2820b60",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myhvac_web/myhvac_service/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7251"
},
{
"name": "JavaScript",
"bytes": "37472"
},
{
"name": "Python",
"bytes": "2043"
}
],
"symlink_target": ""
} |
"""Test parsing a reset action."""
import io
import textwrap
from launch.actions import ResetLaunchConfigurations, SetLaunchConfiguration
from launch.frontend import Parser
from launch.launch_context import LaunchContext
def test_reset():
yaml_file = \
"""\
launch:
- let:
name: 'foo'
value: 'FOO'
- let:
name: 'bar'
value: 'BAR'
- reset:
keep:
- name: 'bar'
value: $(var bar)
- name: 'baz'
value: 'BAZ'
""" # noqa: E501
print('Load YAML')
yaml_file = textwrap.dedent(yaml_file)
print('Load Parser')
root_entity, parser = Parser.load(io.StringIO(yaml_file))
print('Parse Description')
ld = parser.parse_description(root_entity)
assert isinstance(ld.entities[0], SetLaunchConfiguration)
assert isinstance(ld.entities[1], SetLaunchConfiguration)
assert isinstance(ld.entities[2], ResetLaunchConfigurations)
lc = LaunchContext()
assert len(lc.launch_configurations) == 0
ld.entities[0].visit(lc)
ld.entities[1].visit(lc)
assert len(lc.launch_configurations) == 2
assert 'foo' in lc.launch_configurations.keys()
assert lc.launch_configurations['foo'] == 'FOO'
assert 'bar' in lc.launch_configurations.keys()
assert lc.launch_configurations['bar'] == 'BAR'
ld.entities[2].visit(lc)
assert 'foo' not in lc.launch_configurations.keys()
assert 'bar' in lc.launch_configurations.keys()
assert lc.launch_configurations['bar'] == 'BAR'
assert 'baz' in lc.launch_configurations.keys()
assert lc.launch_configurations['baz'] == 'BAZ'
if __name__ == '__main__':
test_reset()
| {
"content_hash": "73816e6bf0794e57e4a1e86357b31d87",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 31.68421052631579,
"alnum_prop": 0.5991140642303433,
"repo_name": "ros2/launch",
"id": "27eb260da5bcc53d6793d6daf39a78e40ff274ad",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "launch_yaml/test/launch_yaml/test_reset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "857"
},
{
"name": "C++",
"bytes": "1468"
},
{
"name": "CMake",
"bytes": "8807"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "1063971"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
import sys
import os
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
from cage.landscape import LandscapeAnalyzer
from cage.core import Facet
"""
Plot the landscape of connected edges. This is some damn illegible code, but
improving it would require thinking, and ain't nobody got time for that.
"""
# PARAMETERS TODO DIRTY AS FUCK -> Find a better way
CATION='Na'
START_FACET = 0 # 0 or 1 -> determines facet to start chain from
ANGLE_MESH = 0.003
RADIUS_MESH = 0.01
CONTOUR_LEVELS = np.mgrid[0.1:2:0.1]
E_LIMITS = (0, 1.4)
# Input
# TODO Make a proper parser
if len(sys.argv) == 3:
CATION = sys.argv[1]
else:
CATION = 'Li'
dirname = sys.argv[-1]
dir_list = [os.path.abspath(dir) for dir in os.listdir(dirname)
if os.path.isdir(dir)]
# TODO Make this part ignore directories that do not have the right files
edges = [[LandscapeAnalyzer.from_file(os.path.join(dir, 'landscape.json')),
[Facet.from_file(os.path.join(dir, 'init_facet.json')),
Facet.from_file(os.path.join(dir, 'final_facet.json'))]]
for dir in dir_list]
for edge in edges:
edge[0].analyze_cation_energies(facet=edge[1][0],cation=CATION)
# TODO Make landscapes and paths connected via dictionary
# Find all the facets in the paths
facets = []
for edge in edges:
for facet in edge[1]:
if facet not in facets:
facets.append(facet)
# Find end_facets
end_facets = []
for edge in edges:
for facet in edge[1]:
if facet not in end_facets:
end_facets.append(facet)
else:
end_facets.remove(facet)
# Check if the chain is connected
if len(end_facets) != 2:
raise ValueError('Edges are not connected in a chain. Aborting...')
# TODO Handle case of circular connection of paths
# Find the starting path
facet_chain = [end_facets[START_FACET], ]
path_chain = []
landscape_chain = []
while len(facet_chain) < len(facets):
last_facet = facet_chain[-1]
print('Last Facet:')
print(str(last_facet))
# Find the path that connects to the last facet and is not already in the
# path chain
for edge in edges:
if edge[1] not in path_chain and last_facet in edge[1]:
other_facet = edge[1].copy()
other_facet.remove(last_facet)
other_facet = other_facet[0]
print('Other Facet:')
print(str(other_facet))
facet_chain.append(other_facet)
if edge[1][0] != last_facet:
print('Flipped path (before):')
print(str(edge[1][0]))
print(str(edge[1][1]))
edge[1].reverse()
print('Flipped path (after):')
print(str(edge[1][0]))
print(str(edge[1][1]))
edge[0].flip_coordinates('Angle')
landscape_chain.append(edge[0])
path_chain.append(edge[1])
print('')
print('----------------')
print('Facet Chain:')
for facet in facet_chain:
print(str(facet))
print('----------------')
print('Path Chain:')
for edge in path_chain:
print('From')
print(str(edge[0]))
print('To')
print(str(edge[1]))
# Interpolate the landscapes to a uniform mesh
# Find the proper radii
min_max_radius = 1e6
max_min_radius = 0
for landscape in landscape_chain:
rmax = landscape.datapoints['Distance'].max()
if rmax < min_max_radius:
min_max_radius = rmax
rmin = landscape.datapoints['Distance'].min()
if rmin > max_min_radius:
max_min_radius = rmin
print('-----------')
print('Largest minimal radius = ' + str(max_min_radius))
print('Smallest maximal radius = ' + str(min_max_radius))
# Adjust the angles to make one angle coordinate for all edges
facet_angles = [0, landscape_chain[0].datapoints['Angle'].max()]
for landscape in landscape_chain[1:]:
print('Maximum angle = ' + str(facet_angles[-1]))
landscape.datapoints['Angle'] += facet_angles[-1]
facet_angles.append(landscape.datapoints['Angle'].max())
all_radii = []
all_angles = []
all_energy = []
# Interpolate the landscapes
for landscape in landscape_chain:
data = landscape.datapoints
data['Distance'] = np.round(data['Distance'], 5)
data = np.sort(data, order=['Distance', 'Angle'])
# Find the number of radii and angles
r_init = data['Distance'][0]
nangles = 1
while abs(data['Distance'][nangles] - r_init) < 1e-5:
nangles += 1
nradii = int(len(data) / nangles)
print('')
print('-----------')
print('Number of Angles = ' + str(nangles))
print('Number of Radii = ' + str(nradii))
# Get the right format for the data
radii = data['Distance'].reshape(nradii, nangles) # [::nradii]
angles = data['Angle'].reshape(nradii, nangles) # [:nangles]
energy = data['Energy'].reshape(nradii, nangles)
print('Shape angles: ' + str(angles.shape))
print('Shape radii: ' + str(radii.shape))
print('Shape energy: ' + str(energy.shape))
new_angles, new_radii = np.mgrid[ angles.min():angles.max():ANGLE_MESH,
max_min_radius:min_max_radius:RADIUS_MESH ]
print('-------------')
print('Shape new_angles: ' + str(new_angles.shape))
print('Shape new_radii: ' + str(new_radii.shape))
tck = interpolate.bisplrep(angles, radii, energy, s=0.01)
new_energy = interpolate.bisplev(new_angles[:,0], new_radii[0,:], tck)
all_radii.append(new_radii)
all_angles.append(new_angles)
all_energy.append(new_energy)
total_radii = np.concatenate(tuple(all_radii))
total_angles = np.concatenate(tuple(all_angles))
total_energy = np.concatenate(tuple(all_energy))
total_energy -= total_energy.min()
plt.figure()
plt.pcolor(total_angles, total_radii, total_energy, vmin=E_LIMITS[0],
vmax=E_LIMITS[1], cmap='viridis')
cbar = plt.colorbar()
cbar.set_label('Energy (eV)', size='x-large')
CS = plt.contour(total_angles, total_radii, total_energy, colors='black',
levels=CONTOUR_LEVELS, linewidths=0.6)
for angle in facet_angles:
plt.plot([angle, angle], [total_radii.min(), total_radii.max()], color='k',
linestyle='-', linewidth=1)
xlabel = []
for i in range(len(facet_angles)):
xlabel.append('$\Omega_' + str(i+1) + '$')
#plt.xlabel('Angle', size='large')
plt.ylabel('$r$ ($\mathrm{\AA}$)', size='x-large', fontname='Georgia')
plt.xticks(facet_angles, xlabel, size='x-large')
plt.clabel(CS, fontsize=10, inline_spacing=15, fmt='%1.1f', manual=True)
plt.show()
| {
"content_hash": "195c6a2d53f2ce9f1f873014f64e3732",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 30.530516431924884,
"alnum_prop": 0.6295555897278179,
"repo_name": "mbercx/cage",
"id": "7d04810df7709cd210358cc135b571a8614174f9",
"size": "6522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cage/scripts/edgelandscape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "242454"
}
],
"symlink_target": ""
} |
"""
@brief test log(time=1s)
"""
import unittest
import inspect
import ast
from pyquickhelper.loghelper import fLOG
from pysqllike.translation.node_visitor_translator import CodeNodeVisitor
from pysqllike.translation.translation_class import TranslateClass
from pysqllike.translation.translation_to_python import Translate2Python
from pysqllike.translation.code_exception import CodeException
def myjob(input):
iter = input.select(input.age, input.nom, age2=input.age * input.age)
wher = iter.where((iter.age > 60).Or(iter.age < 25))
return wher
class TestCode (unittest.TestCase):
def test_tree_job(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
code = inspect.getsource(myjob)
node = ast.parse(code)
stack = [(0, node)]
while len(stack) > 0:
ind, n = stack[-1]
del stack[-1]
att = {name: ch
for name, ch in ast.iter_fields(n)} # pylint: disable=R1721
fLOG(" " * ind, type(n), att.get("name", "--"), att)
for ch in ast.iter_child_nodes(n):
stack.append((ind + 1, ch))
def test_translation(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
code = inspect.getsource(myjob)
node = ast.parse(code)
v = CodeNodeVisitor()
v.visit(node)
assert len(v.Rows) >= 27
def test_translate_class(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
trans = TranslateClass(myjob)
# fLOG(trans)
s = str(trans)
trad = trans.to_str()
assert "60" in trad
assert "Gt" in trad
assert len(s) > 0
assert "input.age2" not in s
assert "input.age" in s
def test_translate_class_code(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
trans = TranslateClass(myjob)
try:
trans.Code()
assert False
except CodeException as e:
assert "not implemented" in str(e)
def test_translate_2_python(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
trans = Translate2Python(myjob)
code = trans.Code()
assert "def myjob(input)" in code
assert "60" in code
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "2f6d8a909fdfad9cd08205da60bf2b4a",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 29.07777777777778,
"alnum_prop": 0.5475735575085976,
"repo_name": "sdpython/pysqllike",
"id": "3ca60c7d3eaf08ccb7f0af718503864dfb1498e1",
"size": "2617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_unittests/ut_code/test_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "Jupyter Notebook",
"bytes": "11155"
},
{
"name": "Python",
"bytes": "120742"
}
],
"symlink_target": ""
} |
class ButtonElement:
def __init__(self, type, title, url=None, payload=None):
# TODO: raise an exception if 'url' is provided with type='postback' and vice versa
self.json = {k: v for k, v in locals().items() if v is not None}
del self.json['self']
for k, v in self.json.items():
if hasattr(v, 'json'):
self.json[k] = v.json
| {
"content_hash": "62e3887fd0802f018828be252812f5ee",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 91,
"avg_line_length": 48.625,
"alnum_prop": 0.570694087403599,
"repo_name": "Elishanto/HarryBotter",
"id": "2c1b7340f14d0bdb76a1241f42affd47922003ac",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/button/element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24440"
}
],
"symlink_target": ""
} |
import copy
import datetime
import mock
import pytest
from elastalert.ruletypes import AnyRule
from elastalert.ruletypes import BaseAggregationRule
from elastalert.ruletypes import BlacklistRule
from elastalert.ruletypes import CardinalityRule
from elastalert.ruletypes import ChangeRule
from elastalert.ruletypes import EventWindow
from elastalert.ruletypes import FlatlineRule
from elastalert.ruletypes import FrequencyRule
from elastalert.ruletypes import MetricAggregationRule
from elastalert.ruletypes import NewTermsRule
from elastalert.ruletypes import PercentageMatchRule
from elastalert.ruletypes import SpikeRule
from elastalert.ruletypes import WhitelistRule
from elastalert.util import EAException
from elastalert.util import ts_now
from elastalert.util import ts_to_dt
def hits(size, **kwargs):
ret = []
for n in range(size):
ts = ts_to_dt('2014-09-26T12:%s:%sZ' % (n / 60, n % 60))
n += 1
event = create_event(ts, **kwargs)
ret.append(event)
return ret
def create_event(timestamp, timestamp_field='@timestamp', **kwargs):
event = {timestamp_field: timestamp}
event.update(**kwargs)
return event
def create_bucket_aggregation(agg_name, buckets):
agg = {agg_name: {'buckets': buckets}}
return agg
def create_percentage_match_agg(match_count, other_count):
agg = create_bucket_aggregation('percentage_match_aggs', {'match_bucket': {'doc_count': match_count}, '_other_': {'doc_count': other_count}})
return agg
def assert_matches_have(matches, terms):
assert len(matches) == len(terms)
for match, term in zip(matches, terms):
assert term[0] in match
assert match[term[0]] == term[1]
def test_any():
event = hits(1)
rule = AnyRule({})
rule.add_data([event])
assert rule.matches == [event]
def test_freq():
events = hits(60, timestamp_field='blah', username='qlo')
rules = {'num_events': 59,
'timeframe': datetime.timedelta(hours=1),
'timestamp_field': 'blah'}
rule = FrequencyRule(rules)
rule.add_data(events)
assert len(rule.matches) == 1
# Test wit query_key
events = hits(60, timestamp_field='blah', username='qlo')
rules['query_key'] = 'username'
rule = FrequencyRule(rules)
rule.add_data(events)
assert len(rule.matches) == 1
# Doesn't match
events = hits(60, timestamp_field='blah', username='qlo')
rules['num_events'] = 61
rule = FrequencyRule(rules)
rule.add_data(events)
assert len(rule.matches) == 0
# garbage collection
assert 'qlo' in rule.occurrences
rule.garbage_collect(ts_to_dt('2014-09-28T12:0:0'))
assert rule.occurrences == {}
def test_freq_count():
rules = {'num_events': 100,
'timeframe': datetime.timedelta(hours=1),
'use_count_query': True}
# Normal match
rule = FrequencyRule(rules)
rule.add_count_data({ts_to_dt('2014-10-10T00:00:00'): 75})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T00:15:00'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T00:25:00'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T00:45:00'): 6})
assert len(rule.matches) == 1
# First data goes out of timeframe first
rule = FrequencyRule(rules)
rule.add_count_data({ts_to_dt('2014-10-10T00:00:00'): 75})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T00:45:00'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T00:55:00'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T01:05:00'): 6})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-10T01:00:00'): 75})
assert len(rule.matches) == 1
def test_freq_out_of_order():
events = hits(60, timestamp_field='blah', username='qlo')
rules = {'num_events': 59,
'timeframe': datetime.timedelta(hours=1),
'timestamp_field': 'blah'}
rule = FrequencyRule(rules)
rule.add_data(events[:10])
assert len(rule.matches) == 0
# Try to add events from before the first occurrence
rule.add_data([{'blah': ts_to_dt('2014-09-26T11:00:00'), 'username': 'qlo'}] * 50)
assert len(rule.matches) == 0
rule.add_data(events[15:20])
assert len(rule.matches) == 0
rule.add_data(events[10:15])
assert len(rule.matches) == 0
rule.add_data(events[20:55])
rule.add_data(events[57:])
assert len(rule.matches) == 0
rule.add_data(events[55:57])
assert len(rule.matches) == 1
def test_freq_terms():
rules = {'num_events': 10,
'timeframe': datetime.timedelta(hours=1),
'query_key': 'username'}
rule = FrequencyRule(rules)
terms1 = {ts_to_dt('2014-01-01T00:01:00Z'): [{'key': 'userA', 'doc_count': 1},
{'key': 'userB', 'doc_count': 5}]}
terms2 = {ts_to_dt('2014-01-01T00:10:00Z'): [{'key': 'userA', 'doc_count': 8},
{'key': 'userB', 'doc_count': 5}]}
terms3 = {ts_to_dt('2014-01-01T00:25:00Z'): [{'key': 'userA', 'doc_count': 3},
{'key': 'userB', 'doc_count': 0}]}
# Initial data
rule.add_terms_data(terms1)
assert len(rule.matches) == 0
# Match for user B
rule.add_terms_data(terms2)
assert len(rule.matches) == 1
assert rule.matches[0].get('username') == 'userB'
# Match for user A
rule.add_terms_data(terms3)
assert len(rule.matches) == 2
assert rule.matches[1].get('username') == 'userA'
def test_eventwindow():
timeframe = datetime.timedelta(minutes=10)
window = EventWindow(timeframe)
timestamps = [ts_to_dt(x) for x in ['2014-01-01T10:00:00',
'2014-01-01T10:05:00',
'2014-01-01T10:03:00',
'2014-01-01T09:55:00',
'2014-01-01T10:09:00']]
for ts in timestamps:
window.append([{'@timestamp': ts}, 1])
timestamps.sort()
for exp, actual in zip(timestamps[1:], window.data):
assert actual[0]['@timestamp'] == exp
window.append([{'@timestamp': ts_to_dt('2014-01-01T10:14:00')}, 1])
timestamps.append(ts_to_dt('2014-01-01T10:14:00'))
for exp, actual in zip(timestamps[3:], window.data):
assert actual[0]['@timestamp'] == exp
def test_spike_count():
rules = {'threshold_ref': 10,
'spike_height': 2,
'timeframe': datetime.timedelta(seconds=10),
'spike_type': 'both',
'timestamp_field': '@timestamp'}
rule = SpikeRule(rules)
# Double rate of events at 20 seconds
rule.add_count_data({ts_to_dt('2014-09-26T00:00:00'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-09-26T00:00:10'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-09-26T00:00:20'): 20})
assert len(rule.matches) == 1
# Downward spike
rule = SpikeRule(rules)
rule.add_count_data({ts_to_dt('2014-09-26T00:00:00'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-09-26T00:00:10'): 10})
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-09-26T00:00:20'): 0})
assert len(rule.matches) == 1
def test_spike_deep_key():
rules = {'threshold_ref': 10,
'spike_height': 2,
'timeframe': datetime.timedelta(seconds=10),
'spike_type': 'both',
'timestamp_field': '@timestamp',
'query_key': 'foo.bar.baz'}
rule = SpikeRule(rules)
rule.add_data([{'@timestamp': ts_to_dt('2015'), 'foo': {'bar': {'baz': 'LOL'}}}])
assert 'LOL' in rule.cur_windows
def test_spike():
# Events are 1 per second
events = hits(100, timestamp_field='ts')
# Constant rate, doesn't match
rules = {'threshold_ref': 10,
'spike_height': 2,
'timeframe': datetime.timedelta(seconds=10),
'spike_type': 'both',
'use_count_query': False,
'timestamp_field': 'ts'}
rule = SpikeRule(rules)
rule.add_data(events)
assert len(rule.matches) == 0
# Double the rate of events after [50:]
events2 = events[:50]
for event in events[50:]:
events2.append(event)
events2.append({'ts': event['ts'] + datetime.timedelta(milliseconds=1)})
rules['spike_type'] = 'up'
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 1
# Doesn't match
rules['spike_height'] = 3
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 0
# Downward spike
events = events[:50] + events[75:]
rules['spike_type'] = 'down'
rule = SpikeRule(rules)
rule.add_data(events)
assert len(rule.matches) == 1
# Doesn't meet threshold_ref
# When ref hits 11, cur is only 20
rules['spike_height'] = 2
rules['threshold_ref'] = 11
rules['spike_type'] = 'up'
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 0
# Doesn't meet threshold_cur
# Maximum rate of events is 20 per 10 seconds
rules['threshold_ref'] = 10
rules['threshold_cur'] = 30
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 0
# Alert on new data
# (At least 25 events occur before 30 seconds has elapsed)
rules.pop('threshold_ref')
rules['timeframe'] = datetime.timedelta(seconds=30)
rules['threshold_cur'] = 25
rules['spike_height'] = 2
rules['alert_on_new_data'] = True
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 1
def test_spike_query_key():
events = hits(100, timestamp_field='ts', username='qlo')
# Constant rate, doesn't match
rules = {'threshold_ref': 10,
'spike_height': 2,
'timeframe': datetime.timedelta(seconds=10),
'spike_type': 'both',
'use_count_query': False,
'timestamp_field': 'ts',
'query_key': 'username'}
rule = SpikeRule(rules)
rule.add_data(events)
assert len(rule.matches) == 0
# Double the rate of events, but with a different usename
events_bob = hits(100, timestamp_field='ts', username='bob')
events2 = events[:50]
for num in range(50, 99):
events2.append(events_bob[num])
events2.append(events[num])
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 0
# Double the rate of events, with the same username
events2 = events[:50]
for num in range(50, 99):
events2.append(events_bob[num])
events2.append(events[num])
events2.append(events[num])
rule = SpikeRule(rules)
rule.add_data(events2)
assert len(rule.matches) == 1
def test_spike_terms():
rules = {'threshold_ref': 5,
'spike_height': 2,
'timeframe': datetime.timedelta(minutes=10),
'spike_type': 'both',
'use_count_query': False,
'timestamp_field': 'ts',
'query_key': 'username',
'use_term_query': True}
terms1 = {ts_to_dt('2014-01-01T00:01:00Z'): [{'key': 'userA', 'doc_count': 10},
{'key': 'userB', 'doc_count': 5}]}
terms2 = {ts_to_dt('2014-01-01T00:10:00Z'): [{'key': 'userA', 'doc_count': 22},
{'key': 'userB', 'doc_count': 5}]}
terms3 = {ts_to_dt('2014-01-01T00:25:00Z'): [{'key': 'userA', 'doc_count': 25},
{'key': 'userB', 'doc_count': 27}]}
terms4 = {ts_to_dt('2014-01-01T00:27:00Z'): [{'key': 'userA', 'doc_count': 10},
{'key': 'userB', 'doc_count': 12},
{'key': 'userC', 'doc_count': 100}]}
terms5 = {ts_to_dt('2014-01-01T00:30:00Z'): [{'key': 'userD', 'doc_count': 100},
{'key': 'userC', 'doc_count': 100}]}
rule = SpikeRule(rules)
# Initial input
rule.add_terms_data(terms1)
assert len(rule.matches) == 0
# No spike for UserA because windows not filled
rule.add_terms_data(terms2)
assert len(rule.matches) == 0
# Spike for userB only
rule.add_terms_data(terms3)
assert len(rule.matches) == 1
assert rule.matches[0].get('username') == 'userB'
# Test no alert for new user over threshold
rules.pop('threshold_ref')
rules['threshold_cur'] = 50
rule = SpikeRule(rules)
rule.add_terms_data(terms1)
rule.add_terms_data(terms2)
rule.add_terms_data(terms3)
rule.add_terms_data(terms4)
assert len(rule.matches) == 0
# Test alert_on_new_data
rules['alert_on_new_data'] = True
rule = SpikeRule(rules)
rule.add_terms_data(terms1)
rule.add_terms_data(terms2)
rule.add_terms_data(terms3)
rule.add_terms_data(terms4)
assert len(rule.matches) == 1
# Test that another alert doesn't fire immediately for userC but it does for userD
rule.matches = []
rule.add_terms_data(terms5)
assert len(rule.matches) == 1
assert rule.matches[0]['username'] == 'userD'
def test_blacklist():
events = [{'@timestamp': ts_to_dt('2014-09-26T12:34:56Z'), 'term': 'good'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:57Z'), 'term': 'bad'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:58Z'), 'term': 'also good'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:59Z'), 'term': 'really bad'},
{'@timestamp': ts_to_dt('2014-09-26T12:35:00Z'), 'no_term': 'bad'}]
rules = {'blacklist': ['bad', 'really bad'],
'compare_key': 'term',
'timestamp_field': '@timestamp'}
rule = BlacklistRule(rules)
rule.add_data(events)
assert_matches_have(rule.matches, [('term', 'bad'), ('term', 'really bad')])
def test_whitelist():
events = [{'@timestamp': ts_to_dt('2014-09-26T12:34:56Z'), 'term': 'good'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:57Z'), 'term': 'bad'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:58Z'), 'term': 'also good'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:59Z'), 'term': 'really bad'},
{'@timestamp': ts_to_dt('2014-09-26T12:35:00Z'), 'no_term': 'bad'}]
rules = {'whitelist': ['good', 'also good'],
'compare_key': 'term',
'ignore_null': True,
'timestamp_field': '@timestamp'}
rule = WhitelistRule(rules)
rule.add_data(events)
assert_matches_have(rule.matches, [('term', 'bad'), ('term', 'really bad')])
def test_whitelist_dont_ignore_nulls():
events = [{'@timestamp': ts_to_dt('2014-09-26T12:34:56Z'), 'term': 'good'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:57Z'), 'term': 'bad'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:58Z'), 'term': 'also good'},
{'@timestamp': ts_to_dt('2014-09-26T12:34:59Z'), 'term': 'really bad'},
{'@timestamp': ts_to_dt('2014-09-26T12:35:00Z'), 'no_term': 'bad'}]
rules = {'whitelist': ['good', 'also good'],
'compare_key': 'term',
'ignore_null': True,
'timestamp_field': '@timestamp'}
rules['ignore_null'] = False
rule = WhitelistRule(rules)
rule.add_data(events)
assert_matches_have(rule.matches, [('term', 'bad'), ('term', 'really bad'), ('no_term', 'bad')])
def test_change():
events = hits(10, username='qlo', term='good')
events[8].pop('term')
events[9]['term'] = 'bad'
rules = {'compare_key': 'term',
'query_key': 'username',
'ignore_null': True,
'timestamp_field': '@timestamp'}
rule = ChangeRule(rules)
rule.add_data(events)
assert_matches_have(rule.matches, [('term', 'bad')])
# Unhashable QK
events2 = hits(10, username=['qlo'], term='good')
events2[9]['term'] = 'bad'
rule = ChangeRule(rules)
rule.add_data(events2)
assert_matches_have(rule.matches, [('term', 'bad')])
# Don't ignore nulls
rules['ignore_null'] = False
rule = ChangeRule(rules)
rule.add_data(events)
assert_matches_have(rule.matches, [('username', 'qlo'), ('term', 'bad')])
# With timeframe
rules['timeframe'] = datetime.timedelta(seconds=2)
rules['ignore_null'] = True
rule = ChangeRule(rules)
rule.add_data(events)
assert_matches_have(rule.matches, [('term', 'bad')])
# With timeframe, doesn't match
events = events[:8] + events[9:]
rules['timeframe'] = datetime.timedelta(seconds=1)
rule = ChangeRule(rules)
rule.add_data(events)
assert rule.matches == []
def test_new_term():
rules = {'fields': ['a', 'b'],
'timestamp_field': '@timestamp',
'es_host': 'example.com', 'es_port': 10, 'index': 'logstash'}
mock_res = {'aggregations': {'filtered': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1},
{'key': 'key2', 'doc_count': 5}]}}}}
with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es:
mock_es.return_value = mock.Mock()
mock_es.return_value.search.return_value = mock_res
mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}}
call_args = []
# search is called with a mutable dict containing timestamps, this is required to test
def record_args(*args, **kwargs):
call_args.append((copy.deepcopy(args), copy.deepcopy(kwargs)))
return mock_res
mock_es.return_value.search.side_effect = record_args
rule = NewTermsRule(rules)
# 30 day default range, 1 day default step, times 2 fields
assert rule.es.search.call_count == 60
# Assert that all calls have the proper ordering of time ranges
old_ts = '2010-01-01T00:00:00Z'
old_field = ''
for call in call_args:
field = call[1]['body']['aggs']['filtered']['aggs']['values']['terms']['field']
if old_field != field:
old_field = field
old_ts = '2010-01-01T00:00:00Z'
gte = call[1]['body']['aggs']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['gte']
assert gte > old_ts
lt = call[1]['body']['aggs']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['lt']
assert lt > gte
old_ts = gte
# Key1 and key2 shouldn't cause a match
rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2'}])
assert rule.matches == []
# Neither will missing values
rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}])
assert rule.matches == []
# Key3 causes an alert for field b
rule.add_data([{'@timestamp': ts_now(), 'a': 'key2', 'b': 'key3'}])
assert len(rule.matches) == 1
assert rule.matches[0]['new_field'] == 'b'
assert rule.matches[0]['b'] == 'key3'
rule.matches = []
# Key3 doesn't cause another alert for field b
rule.add_data([{'@timestamp': ts_now(), 'a': 'key2', 'b': 'key3'}])
assert rule.matches == []
# Missing_field
rules['alert_on_missing_field'] = True
with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es:
mock_es.return_value = mock.Mock()
mock_es.return_value.search.return_value = mock_res
mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}}
rule = NewTermsRule(rules)
rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}])
assert len(rule.matches) == 1
assert rule.matches[0]['missing_field'] == 'b'
def test_new_term_nested_field():
rules = {'fields': ['a', 'b.c'],
'timestamp_field': '@timestamp',
'es_host': 'example.com', 'es_port': 10, 'index': 'logstash'}
mock_res = {'aggregations': {'filtered': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1},
{'key': 'key2', 'doc_count': 5}]}}}}
with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es:
mock_es.return_value = mock.Mock()
mock_es.return_value.search.return_value = mock_res
mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}}
rule = NewTermsRule(rules)
assert rule.es.search.call_count == 60
# Key3 causes an alert for nested field b.c
rule.add_data([{'@timestamp': ts_now(), 'b': {'c': 'key3'}}])
assert len(rule.matches) == 1
assert rule.matches[0]['new_field'] == 'b.c'
assert rule.matches[0]['b']['c'] == 'key3'
rule.matches = []
def test_new_term_with_terms():
rules = {'fields': ['a'],
'timestamp_field': '@timestamp',
'es_host': 'example.com', 'es_port': 10, 'index': 'logstash', 'query_key': 'a',
'window_step_size': {'days': 2}}
mock_res = {'aggregations': {'filtered': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1},
{'key': 'key2', 'doc_count': 5}]}}}}
with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es:
mock_es.return_value = mock.Mock()
mock_es.return_value.search.return_value = mock_res
mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}}
rule = NewTermsRule(rules)
# Only 15 queries because of custom step size
assert rule.es.search.call_count == 15
# Key1 and key2 shouldn't cause a match
terms = {ts_now(): [{'key': 'key1', 'doc_count': 1},
{'key': 'key2', 'doc_count': 1}]}
rule.add_terms_data(terms)
assert rule.matches == []
# Key3 causes an alert for field a
terms = {ts_now(): [{'key': 'key3', 'doc_count': 1}]}
rule.add_terms_data(terms)
assert len(rule.matches) == 1
assert rule.matches[0]['new_field'] == 'a'
assert rule.matches[0]['a'] == 'key3'
rule.matches = []
# Key3 doesn't cause another alert
terms = {ts_now(): [{'key': 'key3', 'doc_count': 1}]}
rule.add_terms_data(terms)
assert rule.matches == []
def test_new_term_with_composite_fields():
rules = {'fields': [['a', 'b', 'c'], ['d', 'e.f']],
'timestamp_field': '@timestamp',
'es_host': 'example.com', 'es_port': 10, 'index': 'logstash'}
mock_res = {
'aggregations': {
'filtered': {
'values': {
'buckets': [
{
'key': 'key1',
'doc_count': 5,
'values': {
'buckets': [
{
'key': 'key2',
'doc_count': 5,
'values': {
'buckets': [
{
'key': 'key3',
'doc_count': 3,
},
{
'key': 'key4',
'doc_count': 2,
},
]
}
}
]
}
}
]
}
}
}
}
with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es:
mock_es.return_value = mock.Mock()
mock_es.return_value.search.return_value = mock_res
mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}}
rule = NewTermsRule(rules)
assert rule.es.search.call_count == 60
# key3 already exists, and thus shouldn't cause a match
rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2', 'c': 'key3'}])
assert rule.matches == []
# key5 causes an alert for composite field [a, b, c]
rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2', 'c': 'key5'}])
assert len(rule.matches) == 1
assert rule.matches[0]['new_field'] == ('a', 'b', 'c')
assert rule.matches[0]['a'] == 'key1'
assert rule.matches[0]['b'] == 'key2'
assert rule.matches[0]['c'] == 'key5'
rule.matches = []
# New values in other fields that are not part of the composite key should not cause an alert
rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2', 'c': 'key4', 'd': 'unrelated_value'}])
assert len(rule.matches) == 0
rule.matches = []
# Verify nested fields work properly
# Key6 causes an alert for nested field e.f
rule.add_data([{'@timestamp': ts_now(), 'd': 'key4', 'e': {'f': 'key6'}}])
assert len(rule.matches) == 1
assert rule.matches[0]['new_field'] == ('d', 'e.f')
assert rule.matches[0]['d'] == 'key4'
assert rule.matches[0]['e']['f'] == 'key6'
rule.matches = []
# Missing_fields
rules['alert_on_missing_field'] = True
with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es:
mock_es.return_value = mock.Mock()
mock_es.return_value.search.return_value = mock_res
mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}}
rule = NewTermsRule(rules)
rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}])
assert len(rule.matches) == 2
# This means that any one of the three n composite fields were not present
assert rule.matches[0]['missing_field'] == ('a', 'b', 'c')
assert rule.matches[1]['missing_field'] == ('d', 'e.f')
def test_flatline():
events = hits(40)
rules = {
'timeframe': datetime.timedelta(seconds=30),
'threshold': 2,
'timestamp_field': '@timestamp',
}
rule = FlatlineRule(rules)
# 1 hit should cause an alert until after at least 30 seconds pass
rule.add_data(hits(1))
assert rule.matches == []
# Add hits with timestamps 2014-09-26T12:00:00 --> 2014-09-26T12:00:09
rule.add_data(events[0:10])
# This will be run at the end of the hits
rule.garbage_collect(ts_to_dt('2014-09-26T12:00:11Z'))
assert rule.matches == []
# This would be run if the query returned nothing for a future timestamp
rule.garbage_collect(ts_to_dt('2014-09-26T12:00:45Z'))
assert len(rule.matches) == 1
# After another garbage collection, since there are still no events, a new match is added
rule.garbage_collect(ts_to_dt('2014-09-26T12:00:50Z'))
assert len(rule.matches) == 2
# Add hits with timestamps 2014-09-26T12:00:30 --> 2014-09-26T12:00:39
rule.add_data(events[30:])
# Now that there is data in the last 30 minutes, no more matches should be added
rule.garbage_collect(ts_to_dt('2014-09-26T12:00:55Z'))
assert len(rule.matches) == 2
# After that window passes with no more data, a new match is added
rule.garbage_collect(ts_to_dt('2014-09-26T12:01:11Z'))
assert len(rule.matches) == 3
def test_flatline_no_data():
rules = {
'timeframe': datetime.timedelta(seconds=30),
'threshold': 2,
'timestamp_field': '@timestamp',
}
rule = FlatlineRule(rules)
# Initial lack of data
rule.garbage_collect(ts_to_dt('2014-09-26T12:00:00Z'))
assert len(rule.matches) == 0
# Passed the timeframe, still no events
rule.garbage_collect(ts_to_dt('2014-09-26T12:35:00Z'))
assert len(rule.matches) == 1
def test_flatline_count():
rules = {'timeframe': datetime.timedelta(seconds=30),
'threshold': 1,
'timestamp_field': '@timestamp'}
rule = FlatlineRule(rules)
rule.add_count_data({ts_to_dt('2014-10-11T00:00:00'): 1})
rule.garbage_collect(ts_to_dt('2014-10-11T00:00:10'))
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-11T00:00:15'): 0})
rule.garbage_collect(ts_to_dt('2014-10-11T00:00:20'))
assert len(rule.matches) == 0
rule.add_count_data({ts_to_dt('2014-10-11T00:00:35'): 0})
assert len(rule.matches) == 1
def test_flatline_query_key():
rules = {'timeframe': datetime.timedelta(seconds=30),
'threshold': 1,
'use_query_key': True,
'query_key': 'qk',
'timestamp_field': '@timestamp'}
rule = FlatlineRule(rules)
# Adding two separate query keys, the flatline rule should trigger for both
rule.add_data(hits(1, qk='key1'))
rule.add_data(hits(1, qk='key2'))
rule.add_data(hits(1, qk='key3'))
assert rule.matches == []
# This will be run at the end of the hits
rule.garbage_collect(ts_to_dt('2014-09-26T12:00:11Z'))
assert rule.matches == []
# Add new data from key3. It will not immediately cause an alert
rule.add_data([create_event(ts_to_dt('2014-09-26T12:00:20Z'), qk='key3')])
# key1 and key2 have not had any new data, so they will trigger the flatline alert
timestamp = '2014-09-26T12:00:45Z'
rule.garbage_collect(ts_to_dt(timestamp))
assert len(rule.matches) == 2
assert set(['key1', 'key2']) == set([m['key'] for m in rule.matches if m['@timestamp'] == timestamp])
# Next time the rule runs, all 3 keys still have no data, so all three will cause an alert
timestamp = '2014-09-26T12:01:20Z'
rule.garbage_collect(ts_to_dt(timestamp))
assert len(rule.matches) == 5
assert set(['key1', 'key2', 'key3']) == set([m['key'] for m in rule.matches if m['@timestamp'] == timestamp])
def test_cardinality_max():
rules = {'max_cardinality': 4,
'timeframe': datetime.timedelta(minutes=10),
'cardinality_field': 'user',
'timestamp_field': '@timestamp'}
rule = CardinalityRule(rules)
# Add 4 different usernames
users = ['bill', 'coach', 'zoey', 'louis']
for user in users:
event = {'@timestamp': datetime.datetime.now(), 'user': user}
rule.add_data([event])
assert len(rule.matches) == 0
rule.garbage_collect(datetime.datetime.now())
# Add a duplicate, stay at 4 cardinality
event = {'@timestamp': datetime.datetime.now(), 'user': 'coach'}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now())
assert len(rule.matches) == 0
# Next unique will trigger
event = {'@timestamp': datetime.datetime.now(), 'user': 'francis'}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now())
assert len(rule.matches) == 1
rule.matches = []
# 15 minutes later, adding more will not trigger an alert
users = ['nick', 'rochelle', 'ellis']
for user in users:
event = {'@timestamp': datetime.datetime.now() + datetime.timedelta(minutes=15), 'user': user}
rule.add_data([event])
assert len(rule.matches) == 0
def test_cardinality_min():
rules = {'min_cardinality': 4,
'timeframe': datetime.timedelta(minutes=10),
'cardinality_field': 'user',
'timestamp_field': '@timestamp'}
rule = CardinalityRule(rules)
# Add 2 different usernames, no alert because time hasn't elapsed
users = ['foo', 'bar']
for user in users:
event = {'@timestamp': datetime.datetime.now(), 'user': user}
rule.add_data([event])
assert len(rule.matches) == 0
rule.garbage_collect(datetime.datetime.now())
# Add 3 more unique ad t+5 mins
users = ['faz', 'fuz', 'fiz']
for user in users:
event = {'@timestamp': datetime.datetime.now() + datetime.timedelta(minutes=5), 'user': user}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now() + datetime.timedelta(minutes=5))
assert len(rule.matches) == 0
# Adding the same one again at T+15 causes an alert
user = 'faz'
event = {'@timestamp': datetime.datetime.now() + datetime.timedelta(minutes=15), 'user': user}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now() + datetime.timedelta(minutes=15))
assert len(rule.matches) == 1
def test_cardinality_qk():
rules = {'max_cardinality': 2,
'timeframe': datetime.timedelta(minutes=10),
'cardinality_field': 'foo',
'timestamp_field': '@timestamp',
'query_key': 'user'}
rule = CardinalityRule(rules)
# Add 3 different usernames, one value each
users = ['foo', 'bar', 'baz']
for user in users:
event = {'@timestamp': datetime.datetime.now(), 'user': user, 'foo': 'foo' + user}
rule.add_data([event])
assert len(rule.matches) == 0
rule.garbage_collect(datetime.datetime.now())
# Add 2 more unique for "baz", one alert per value
values = ['faz', 'fuz', 'fiz']
for value in values:
event = {'@timestamp': datetime.datetime.now() + datetime.timedelta(minutes=5), 'user': 'baz', 'foo': value}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now() + datetime.timedelta(minutes=5))
assert len(rule.matches) == 2
assert rule.matches[0]['user'] == 'baz'
assert rule.matches[1]['user'] == 'baz'
assert rule.matches[0]['foo'] == 'fuz'
assert rule.matches[1]['foo'] == 'fiz'
def test_cardinality_nested_cardinality_field():
rules = {'max_cardinality': 4,
'timeframe': datetime.timedelta(minutes=10),
'cardinality_field': 'd.ip',
'timestamp_field': '@timestamp'}
rule = CardinalityRule(rules)
# Add 4 different IPs
ips = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4']
for ip in ips:
event = {'@timestamp': datetime.datetime.now(), 'd': {'ip': ip}}
rule.add_data([event])
assert len(rule.matches) == 0
rule.garbage_collect(datetime.datetime.now())
# Add a duplicate, stay at 4 cardinality
event = {'@timestamp': datetime.datetime.now(), 'd': {'ip': '10.0.0.4'}}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now())
assert len(rule.matches) == 0
# Add an event with no IP, stay at 4 cardinality
event = {'@timestamp': datetime.datetime.now()}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now())
assert len(rule.matches) == 0
# Next unique will trigger
event = {'@timestamp': datetime.datetime.now(), 'd': {'ip': '10.0.0.5'}}
rule.add_data([event])
rule.garbage_collect(datetime.datetime.now())
assert len(rule.matches) == 1
rule.matches = []
# 15 minutes later, adding more will not trigger an alert
ips = ['10.0.0.6', '10.0.0.7', '10.0.0.8']
for ip in ips:
event = {'@timestamp': datetime.datetime.now() + datetime.timedelta(minutes=15), 'd': {'ip': ip}}
rule.add_data([event])
assert len(rule.matches) == 0
def test_base_aggregation_constructor():
rules = {'bucket_interval_timedelta': datetime.timedelta(seconds=10),
'buffer_time': datetime.timedelta(minutes=1),
'timestamp_field': '@timestamp'}
# Test time period constructor logic
rules['bucket_interval'] = {'seconds': 10}
rule = BaseAggregationRule(rules)
assert rule.rules['bucket_interval_period'] == '10s'
rules['bucket_interval'] = {'minutes': 5}
rule = BaseAggregationRule(rules)
assert rule.rules['bucket_interval_period'] == '5m'
rules['bucket_interval'] = {'hours': 4}
rule = BaseAggregationRule(rules)
assert rule.rules['bucket_interval_period'] == '4h'
rules['bucket_interval'] = {'days': 2}
rule = BaseAggregationRule(rules)
assert rule.rules['bucket_interval_period'] == '2d'
rules['bucket_interval'] = {'weeks': 1}
rule = BaseAggregationRule(rules)
assert rule.rules['bucket_interval_period'] == '1w'
# buffer_time evenly divisible by bucket_interval
with pytest.raises(EAException):
rules['bucket_interval_timedelta'] = datetime.timedelta(seconds=13)
rule = BaseAggregationRule(rules)
# run_every evenly divisible by bucket_interval
rules['use_run_every_query_size'] = True
rules['run_every'] = datetime.timedelta(minutes=2)
rules['bucket_interval_timedelta'] = datetime.timedelta(seconds=10)
rule = BaseAggregationRule(rules)
with pytest.raises(EAException):
rules['bucket_interval_timedelta'] = datetime.timedelta(seconds=13)
rule = BaseAggregationRule(rules)
def test_base_aggregation_payloads():
with mock.patch.object(BaseAggregationRule, 'check_matches', return_value=None) as mock_check_matches:
rules = {'bucket_interval': {'seconds': 10},
'bucket_interval_timedelta': datetime.timedelta(seconds=10),
'buffer_time': datetime.timedelta(minutes=5),
'timestamp_field': '@timestamp'}
timestamp = datetime.datetime.now()
interval_agg = create_bucket_aggregation('interval_aggs', [{'key_as_string': '2014-01-01T00:00:00Z'}])
rule = BaseAggregationRule(rules)
# Payload not wrapped
rule.add_aggregation_data({timestamp: {}})
mock_check_matches.assert_called_once_with(timestamp, None, {})
mock_check_matches.reset_mock()
# Payload wrapped by date_histogram
interval_agg_data = {timestamp: interval_agg}
rule.add_aggregation_data(interval_agg_data)
mock_check_matches.assert_called_once_with(ts_to_dt('2014-01-01T00:00:00Z'), None, {'key_as_string': '2014-01-01T00:00:00Z'})
mock_check_matches.reset_mock()
# Payload wrapped by terms
bucket_agg_data = {timestamp: create_bucket_aggregation('bucket_aggs', [{'key': 'qk'}])}
rule.add_aggregation_data(bucket_agg_data)
mock_check_matches.assert_called_once_with(timestamp, 'qk', {'key': 'qk'})
mock_check_matches.reset_mock()
# Payload wrapped by terms and date_histogram
bucket_interval_agg_data = {timestamp: create_bucket_aggregation('bucket_aggs', [{'key': 'qk', 'interval_aggs': interval_agg['interval_aggs']}])}
rule.add_aggregation_data(bucket_interval_agg_data)
mock_check_matches.assert_called_once_with(ts_to_dt('2014-01-01T00:00:00Z'), 'qk', {'key_as_string': '2014-01-01T00:00:00Z'})
mock_check_matches.reset_mock()
def test_metric_aggregation():
rules = {'buffer_time': datetime.timedelta(minutes=5),
'timestamp_field': '@timestamp',
'metric_agg_type': 'avg',
'metric_agg_key': 'cpu_pct'}
# Check threshold logic
with pytest.raises(EAException):
rule = MetricAggregationRule(rules)
rules['min_threshold'] = 0.1
rules['max_threshold'] = 0.8
rule = MetricAggregationRule(rules)
assert rule.rules['aggregation_query_element'] == {'cpu_pct_avg': {'avg': {'field': 'cpu_pct'}}}
assert rule.crossed_thresholds(None) is False
assert rule.crossed_thresholds(0.09) is True
assert rule.crossed_thresholds(0.10) is False
assert rule.crossed_thresholds(0.79) is False
assert rule.crossed_thresholds(0.81) is True
rule.check_matches(datetime.datetime.now(), None, {'cpu_pct_avg': {'value': None}})
rule.check_matches(datetime.datetime.now(), None, {'cpu_pct_avg': {'value': 0.5}})
assert len(rule.matches) == 0
rule.check_matches(datetime.datetime.now(), None, {'cpu_pct_avg': {'value': 0.05}})
rule.check_matches(datetime.datetime.now(), None, {'cpu_pct_avg': {'value': 0.95}})
assert len(rule.matches) == 2
rules['query_key'] = 'qk'
rule = MetricAggregationRule(rules)
rule.check_matches(datetime.datetime.now(), 'qk_val', {'cpu_pct_avg': {'value': 0.95}})
assert rule.matches[0]['qk'] == 'qk_val'
def test_percentage_match():
rules = {'match_bucket_filter': {'term': 'term_val'},
'buffer_time': datetime.timedelta(minutes=5),
'timestamp_field': '@timestamp'}
# Check threshold logic
with pytest.raises(EAException):
rule = PercentageMatchRule(rules)
rules['min_percentage'] = 25
rules['max_percentage'] = 75
rule = PercentageMatchRule(rules)
assert rule.rules['aggregation_query_element'] == {'percentage_match_aggs': {'filters': {'other_bucket': True, 'filters': {'match_bucket': {'bool': {'must': {'term': 'term_val'}}}}}}}
assert rule.percentage_violation(25) is False
assert rule.percentage_violation(50) is False
assert rule.percentage_violation(75) is False
assert rule.percentage_violation(24.9) is True
assert rule.percentage_violation(75.1) is True
rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(0, 0))
rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(None, 100))
rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(26, 74))
rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(74, 26))
assert len(rule.matches) == 0
rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(24, 76))
rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(76, 24))
assert len(rule.matches) == 2
rules['query_key'] = 'qk'
rule = PercentageMatchRule(rules)
rule.check_matches(datetime.datetime.now(), 'qk_val', create_percentage_match_agg(76, 24))
assert rule.matches[0]['qk'] == 'qk_val'
| {
"content_hash": "12fae68bc3e41a7cc77aed02f8c30aeb",
"timestamp": "",
"source": "github",
"line_count": 1096,
"max_line_length": 187,
"avg_line_length": 38.16332116788321,
"alnum_prop": 0.583976857054056,
"repo_name": "jetyang2005/elastalert",
"id": "39a637ec675b83baf4c41d8f51557e1da33ef0b2",
"size": "41851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/rules_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8948"
},
{
"name": "HTML",
"bytes": "80454"
},
{
"name": "Makefile",
"bytes": "480"
},
{
"name": "Python",
"bytes": "680744"
},
{
"name": "Shell",
"bytes": "2779"
}
],
"symlink_target": ""
} |
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(over = 'raise', divide = 'raise', invalid = 'raise', under = 'ignore') # numpy raises warnings, convert to exceptions to trap them
import pyeq2.Model_2D_BaseClass
class NIST_Bennett5(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Bennett5"
_HTML = 'y = a * (b+x)<sup>-1/c</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/bennett5.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.power(b * x_in, 1.0 / c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * pow(b * x_in, 1.0 / c);\n"
return s
class NIST_BoxBOD(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST BoxBOD"
_HTML = 'y = a * (1.0-exp(-b*x))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/boxbod.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NegX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['NegX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * (1.0 - numpy.exp(b*x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * (1.0 - exp(-b*x_in));\n"
return s
class NIST_Chwirut(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Chwirut"
_HTML = 'y = exp(-a*x) / (b + c*x)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/chwirut1.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NegX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
x_NegX = inDataCacheDictionary['NegX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.exp(a * x_NegX) / (b + c * x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += exp(-a * x_in) / (b + c * x_in);\n"
return s
class NIST_DanWood(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST DanWood"
_HTML = 'y = a*x<sup>b</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/daniel_wood.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * pow(x_in, b);\n"
return s
class NIST_ENSO(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST ENSO"
_HTML = 'y = a + b*cos(2*pi*x/12) + c*sin(2*pi*x/12) + f*cos(2*pi*x/d) + g*sin(2*pi*x/d) + i*cos(2*pi*x/h) + j*sin(2*pi*x/h)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h', 'i', 'j']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/enso.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = False
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NistEnsoCosX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.NistEnsoSinX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.TwoPiX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_NistEnsoCosX = inDataCacheDictionary['NistEnsoCosX'] # only need to perform this dictionary look-up once
x_NistEnsoSinX = inDataCacheDictionary['NistEnsoSinX'] # only need to perform this dictionary look-up once
x_TwoPiX = inDataCacheDictionary['TwoPiX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
i = inCoeffs[7]
j = inCoeffs[8]
try:
od = x_TwoPiX / d
og = x_TwoPiX / h
temp = a + b * x_NistEnsoCosX + c * x_NistEnsoSinX
temp += f *numpy.cos(od) + g * numpy.sin(od)
temp += i * numpy.cos(og) + j * numpy.sin(og)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = ""
s += "\tdouble two_pi_x = 2.0 * 3.14159265358979323846 * x_in;\n"
s += "\tdouble o12 = two_pi_x / 12.0;\n"
s += "\tdouble od = two_pi_x / d;\n"
s += "\tdouble og = two_pi_x / h;\n"
s += "\ttemp += a + b * cos(o12) + c * sin(o12);\n"
s += "\ttemp += f * cos(od) + g * sin(od);\n"
s += "\ttemp += i * cos(og) + j * sin(og);\n"
return s
class NIST_Eckerle4(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Eckerle4"
_HTML = 'y = (a/b) * exp(-0.5*((x-c)/b)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp1 = (x_in - c) / b
temp = (a / b) * numpy.exp(-0.5 * temp1 * temp1)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += (a / b) * exp(-0.5 * ((x_in - c) / b) * ((x_in - c) / b));\n"
return s
class NIST_Gauss(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Gauss"
_HTML = 'y = a*exp(-b*x) + c*exp(-(x-d)<sup>2</sup> / f<sup>2</sup>) + g*exp(-(x-h)<sup>2</sup> / i<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h', 'i']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/gauss1.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
i = inCoeffs[7]
try:
xminusd = x_in-d
xminush = x_in-h
temp = a * numpy.exp(-b * x_in)
temp += c * numpy.exp(-1.0 * xminusd * xminusd / (f * f))
temp += g * numpy.exp(-1.0 * xminush * xminush / (i * i))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-b * x_in) + c * exp(-1.0 * (x_in-d) * (x_in-d) / (f * f)) + g * exp(-1.0 * (x_in-h) * (x_in-h) / (i * i));\n"
return s
class NIST_Hahn(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Hahn"
_HTML = 'y = (a + b*x + c*x<sup>2</sup> + d*x<sup>3</sup>) / (1.0 + f*x + g*x<sup>2</sup> + h*x<sup>3</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/hahn1.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[3.0]), [3.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
x_PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
x_PowX3 = inDataCacheDictionary['PowX_3.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
try:
temp = (a + b * x_in + c * x_PowX2 + d * x_PowX3) / (1.0 + f * x_in + g * x_PowX2 + h * x_PowX3)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += (a + b * x_in + c * x_in * x_in + d * x_in * x_in * x_in) / (1.0 + f * x_in + g * x_in * x_in + h * x_in * x_in * x_in);\n"
return s
class NIST_Kirby(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Kirby"
_HTML = 'y = (a + b*x + c*x<sup>2</sup>) / (1.0 + d*x + f*x<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/kirby2.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
x_PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = (a + b * x_in + c * x_PowX2) / (1.0 + d * x_in + f * x_PowX2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += (a + b * x_in + c * x_in * x_in) / (1.0 + d * x_in + f * x_in * x_in);\n"
return s
class NIST_Lanczos(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Lanczos"
_HTML = 'y = a*exp(-b*x) + c*exp(-d*x) + f*exp(-g*x)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/lanczos1.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NegX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_NegX = inDataCacheDictionary['NegX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.exp(b * x_NegX) + c * numpy.exp(d * x_NegX) + f * numpy.exp(g * x_NegX)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * exp(-b * x_in) + c * exp(-d * x_in) + f * exp(-g * x_in);\n"
return s
class NIST_MGH09(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST MGH09"
_HTML = 'y = a * (x<sup>2</sup> + b*x) / (x<sup>2</sup> + c*x + d)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
x_PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * (x_PowX2 + b * x_in) / (x_PowX2 + c * x_in + d)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * (x_in*x_in + b * x_in) / (x_in*x_in + c * x_in + d);\n"
return s
class NIST_MGH10(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST MGH10"
_HTML = 'y = a * exp(b/(x+c))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.exp(b / (x_in + c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * exp(b / (x_in + c));\n"
return s
class NIST_MGH17(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST MGH17"
_HTML = 'y = a + b*exp(-x*d) + c*exp(-x*f)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/mgh17.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = False
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NegX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_Neg = inDataCacheDictionary['NegX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a + b * numpy.exp(x_Neg * c) + d * numpy.exp(x_Neg * f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a + b * exp(-1.0 * x_in * c) + d * exp(-1.0 * x_in * f);\n"
return s
class NIST_Misra1a(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Misra1a"
_HTML = 'y = a * (1.0 - exp(-b*x))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/misra1a.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NegX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_NegX = inDataCacheDictionary['NegX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * (1.0 - numpy.exp(b * x_NegX))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * (1.0 - exp(-1.0 * b * x_in));\n"
return s
class NIST_Misra1b(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Misra1b"
_HTML = 'y = a * (1.0 - (1.0+b*x/2.0)<sup>-2.0</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/misra1b.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * (1.0 - numpy.power(1.0 + b * x_in / 2.0, -2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * (1.0 - pow(1.0 + b * x_in / 2.0, -2.0));\n"
return s
class NIST_Misra1c(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Misra1c"
_HTML = 'y = a * (1.0 - 2.0*b*x)<sup>-0.5</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/misra1c.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(1.0 - 2.0 * b * x_in, -0.5)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a * pow(1.0 - 2.0 * b * x_in, -0.5);\n"
return s
class NIST_Misra1d(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Misra1d"
_HTML = 'y = a * b * x * (1.0 + b*x)<sup>-1.0</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/misra1d.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * b * x_in / (1.0 + b * x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s += "\ttemp += a * b * x_in / (1.0 + b * x_in);\n"
return s
class NIST_Rat42(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Rat42"
_HTML = 'y = a / (1.0 + exp(b - c*x))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a / (1.0 + numpy.exp(b - c * x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s += "\ttemp += a / (1.0 + exp(b - c * x_in));\n"
return s
class NIST_Rat43(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Rat43"
_HTML = 'y = a / ((1.0 + exp(b - c*x))<sup>(1.0/d)</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a / numpy.power(1.0 + numpy.exp(b - c * x_in), 1.0 / d)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s += "\ttemp += a / pow(1.0 + exp(b - c * x_in), 1.0 / d);\n"
return s
class NIST_Roszman(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Roszman"
_HTML = 'y = a - bx - (arctan(c/(x-d)) / pi)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/roszman1.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = False
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a - b * x_in + numpy.arctan(c/(x_in-d)) / numpy.pi
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp += a - b * x_in + atan(c/(x_in-d)) / 3.14159265358979323846;\n"
return s
class NIST_Thurber(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "NIST Thurber"
_HTML = 'y = (a + bx + cx<sup>2</sup> + dx<sup>3</sup>) / (1.0 + fx + gx<sup>2</sup> + hx<sup>3</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = 'http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml'
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[3.0]), [3.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
x_PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
x_PowX3 = inDataCacheDictionary['PowX_3.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
try:
temp = a + b * x_in + c * x_PowX2 + d * x_PowX3 / (1.0 + f * x_in + g * x_PowX2 + h * x_PowX3)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a + b * x_in + c * pow(x_in, 2.0) + d * pow(x_in, 3.0) / (1.0 + f * x_in + g * pow(x_in, 2.0) + h * pow(x_in, 3.0));\n"
return s
| {
"content_hash": "fe117c98a63a13a44861953ecde04d45",
"timestamp": "",
"source": "github",
"line_count": 1120,
"max_line_length": 146,
"avg_line_length": 38.049107142857146,
"alnum_prop": 0.68060542062654,
"repo_name": "JMoravec/unkRadnet",
"id": "3fdb96b8f068100499adb41a9bf9c2c7d39722b8",
"size": "42991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitToCurve/pyeq2/Models_2D/NIST.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6107"
},
{
"name": "Logos",
"bytes": "132148"
},
{
"name": "M",
"bytes": "832584"
},
{
"name": "Matlab",
"bytes": "401"
},
{
"name": "Python",
"bytes": "2747757"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
} |
raise DeprecationWarning(
"Ray SGD has been deprecated as of Ray 1.13. For distributed "
"deep learning on Ray please use Ray AI Runtime (Ray AIR) "
"instead ("
"https://docs.ray.io/en/master/ray-air/getting-started.html)."
)
| {
"content_hash": "f719970eca67525c7b0947871b0a6aac",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 40.333333333333336,
"alnum_prop": 0.6900826446280992,
"repo_name": "ray-project/ray",
"id": "969f57a7cf749695b1efd1db15bb4002c1ff74fe",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/util/sgd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_sponsorship', '0003_auto_20170813_1730'),
]
operations = [
migrations.AddField(
model_name='sponsorlevel',
name='available',
field=models.BooleanField(default=True, verbose_name='Available?'),
),
]
| {
"content_hash": "5f6a745169d072dcd270957616976ef6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 23.77777777777778,
"alnum_prop": 0.6214953271028038,
"repo_name": "pyohio/symposion",
"id": "b93dc093c392cd445a69544e137f9c7df029c782",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/pyohio-2019",
"path": "symposion/sponsorship/migrations/0004_sponsorlevel_available.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "287626"
}
],
"symlink_target": ""
} |
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:Devil123@127.0.0.1/FurnitoData?charset=utf8'
json_store = '/home/boyang/Documents/furnitures' | {
"content_hash": "e2a25b3cd6c0b3a382bd0cacc7b346f6",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 92,
"avg_line_length": 51.333333333333336,
"alnum_prop": 0.7792207792207793,
"repo_name": "Informationretrieval2016/Furnito_web",
"id": "1557530a9c53127b877d15aac864b96ca40456ec",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2793"
},
{
"name": "HTML",
"bytes": "7013"
},
{
"name": "Python",
"bytes": "7838"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.