repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
shinpeimuraoka/ryu
|
refs/heads/master
|
ryu/lib/ofctl_v1_5.py
|
5
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_5
from ryu.ofproto import ofproto_v1_5_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_5)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def _get_actions(dp, dics):
actions = []
for d in dics:
action = to_action(dp, d)
if action is not None:
actions.append(action)
else:
LOG.error('Unknown action type: %s', d)
return actions
def to_instructions(dp, insts):
instructions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for i in insts:
inst_type = i.get('type')
if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']:
dics = i.get('actions', [])
actions = _get_actions(dp, dics)
if actions:
if inst_type == 'APPLY_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
else:
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
actions))
elif inst_type == 'CLEAR_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif inst_type == 'GOTO_TABLE':
table_id = str_to_int(i.get('table_id'))
instructions.append(parser.OFPInstructionGotoTable(table_id))
elif inst_type == 'WRITE_METADATA':
metadata = str_to_int(i.get('metadata'))
metadata_mask = (str_to_int(i['metadata_mask'])
if 'metadata_mask' in i
else parser.UINT64_MAX)
instructions.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
else:
LOG.error('Unknown instruction type: %s', inst_type)
return instructions
def action_to_str(act):
s = act.to_jsondict()[act.__class__.__name__]
t = UTIL.ofp_action_type_to_user(s['type'])
s['type'] = t if t != s['type'] else 'UNKNOWN'
if t == 'SET_FIELD':
field = s.pop('field')
s['field'] = field['OXMTlv']['field']
s['mask'] = field['OXMTlv']['mask']
s['value'] = field['OXMTlv']['value']
elif t == 'COPY_FIELD':
oxm_ids = s.pop('oxm_ids')
s['src_oxm_id'] = oxm_ids[0]['OFPOxmId']['type']
s['dst_oxm_id'] = oxm_ids[1]['OFPOxmId']['type']
return s
def instructions_to_str(instructions):
s = []
for i in instructions:
v = i.to_jsondict()[i.__class__.__name__]
t = UTIL.ofp_instruction_type_to_user(v['type'])
inst_type = t if t != v['type'] else 'UNKNOWN'
# apply/write/clear-action instruction
if isinstance(i, ofproto_v1_5_parser.OFPInstructionActions):
acts = []
for a in i.actions:
acts.append(action_to_str(a))
v['type'] = inst_type
v['actions'] = acts
s.append(v)
# others
else:
v['type'] = inst_type
s.append(v)
return s
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'eth_type': str_to_int,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'ip_proto': str_to_int,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int,
'mpls_bos': str_to_int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int,
'pbb_uca': str_to_int,
'tcp_flags': str_to_int,
'actset_output': str_to_int,
'packet_type': ofctl_utils.to_match_packet_type}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'ipv4_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['ipv4_src']
del attrs['ipv4_src']
if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['ipv4_dst']
del attrs['ipv4_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_5.OFPVID_PRESENT)
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'vlan_vid':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
elif key == 'packet_type':
value = [value >> 16, value & 0xffff]
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_5.OFPVID_PRESENT)
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def stats_to_str(ofstats):
stats = {}
ofstats = ofstats.to_jsondict()['OFPStats']
ofstats = ofstats['oxs_fields']
for s in ofstats:
key = s['OXSTlv']['field']
if key == 'duration':
value = {
'duration_sec': s['OXSTlv']['value'][0],
'duration_nsec': s['OXSTlv']['value'][1],
}
elif key == 'idle_time':
value = {
'idle_time_sec': s['OXSTlv']['value'][0],
'idle_time_nsec': s['OXSTlv']['value'][1],
}
else:
value = s['OXSTlv']['value']
stats.setdefault(key, value)
return stats
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = t if t != p['type'] else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user)
def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueDescStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for queue in msg.body:
q = queue.to_jsondict()[queue.__class__.__name__]
prop_list = []
for prop in queue.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
prop_list.append(p)
q['properties'] = prop_list
configs.append(q)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_desc_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
# Note: OpenFlow does not allow to filter flow entries by priority,
# but for efficiency, ofctl provides the way to do it.
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowDescStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = stats.to_jsondict()[stats.__class__.__name__]
s['instructions'] = instructions_to_str(stats.instructions)
s['stats'] = stats_to_str(stats.stats)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
# Note: OpenFlow does not allow to filter flow entries by priority,
# but for efficiency, ofctl provides the way to do it.
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = stats.to_jsondict()[stats.__class__.__name__]
s['stats'] = stats_to_str(stats.stats)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
s['stats'] = stats_to_str(stats.stats)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS,
ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_packet = ofproto.OFPTFPT_PACKET_TYPES
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS,
ofproto.OFPTFPT_WRITE_COPYFIELD,
ofproto.OFPTFPT_WRITE_COPYFIELD_MISS,
ofproto.OFPTFPT_APPLY_COPYFIELD,
ofproto.OFPTFPT_APPLY_COPYFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
if prop.type in p_type_instructions:
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len,
'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for i in prop.action_ids:
act = i.to_jsondict()[i.__class__.__name__]
action_ids.append(act)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for i in prop.oxm_ids:
oxm = i.to_jsondict()[i.__class__.__name__]
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif prop.type == p_type_packet:
oxm_values = []
for val in prop.oxm_values:
i = {val[0]: val[1]}
oxm_values.append(i)
p['oxm_values'] = oxm_values
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
properties = []
for prop in stats.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_port_stats_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
bands = []
for band in stats.band_stats:
b = band.to_jsondict()[band.__class__.__name__]
bands.append(b)
s['band_stats'] = bands
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_desc(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterDescStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
c = config.to_jsondict()[config.__class__.__name__]
bands = []
for band in config.bands:
b = band.to_jsondict()[band.__class__.__name__]
if to_user:
t = UTIL.ofp_meter_band_type_to_user(band.type)
b['type'] = t if t != band.type else 'UNKNOWN'
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c['flags'] = c_flags
c['bands'] = bands
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
g = stats.to_jsondict()[stats.__class__.__name__]
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__]
bucket_stats.append(c)
g['bucket_stats'] = bucket_stats
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB',
ofp.OFPAT_COPY_FIELD: 'COPY_FIELD',
ofp.OFPAT_METER: 'METER',
ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
d = stats.to_jsondict()[stats.__class__.__name__]
buckets = []
for bucket in stats.buckets:
b = bucket.to_jsondict()[bucket.__class__.__name__]
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
properties = []
for prop in bucket.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_group_bucket_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
b['actions'] = actions
b['properties'] = properties
buckets.append(b)
d['buckets'] = buckets
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
t = UTIL.ofp_group_type_to_user(stats.type)
d['type'] = t if t != stats.type else 'UNKNOWN'
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_port_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
d['name'] = stat.name.decode('utf-8')
d['properties'] = properties
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_role(dp, waiters, to_user=True):
return ofctl_utils.get_role(dp, waiters, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
importance = str_to_int(flow.get('importance', 0))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_instructions(dp, flow.get('instructions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
importance, flags, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
t = UTIL.ofp_meter_flags_from_user(flag)
f = t if t != flag else None
if f is None:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= f
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = str_to_int(band.get('rate', 0))
burst_size = str_to_int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = str_to_int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = str_to_int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
ofp = dp.ofproto
parser = dp.ofproto_parser
group_type = str(group.get('type', 'ALL'))
t = UTIL.ofp_group_type_from_user(group_type)
group_type = t if t != group_type else None
if group_type is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
command_bucket_id = str_to_int(group.get('command_bucket_id', 0))
# Note:
# The list of group property types that are currently defined
# are only OFPGPT_EXPERIMENTER(Experimenter defined).
properties = []
buckets = []
for bucket in group.get('buckets', []):
# get bucket_id in buckets
bucket_id = str_to_int(bucket.get('bucket_id', 0))
# get actions in buckets
bucket_actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
bucket_actions.append(action)
# get properties in buckets
bucket_properties = []
for p in bucket.get('properties', []):
group_bp_type = str(p.get('type', 'WEIGHT'))
t = UTIL.ofp_group_bucket_prop_type_from_user(group_bp_type)
group_bp_type = t if t != group_bp_type else ofp.OFPGBPT_WEIGHT
if group_bp_type == ofp.OFPGBPT_WEIGHT:
weight = str_to_int(p.get('weight', 0))
bucket_properties.append(
parser.OFPGroupBucketPropWeight(
type_=group_bp_type, weight=weight))
elif group_bp_type == ofp.OFPGBPT_WATCH_PORT:
watch_port = str_to_int(p.get('watch', dp.ofproto.OFPP_ANY))
bucket_properties.append(
parser.OFPGroupBucketPropWatch(
type_=group_bp_type, watch=watch_port))
elif group_bp_type == ofp.OFPGBPT_WATCH_GROUP:
watch_group = str_to_int(p.get('watch', dp.ofproto.OFPG_ANY))
bucket_properties.append(
parser.OFPGroupBucketPropWatch(
type_=group_bp_type, watch=watch_group))
elif group_bp_type == ofp.OFPGBPT_EXPERIMENTER:
experimenter = p.get('experimenter', 0)
exp_type = p.get('exp_type', 0)
data_type = p.get('data_type', 'ascii')
if data_type not in ['ascii', 'base64']:
LOG.error('Unknown data type: %s', data_type)
data = p.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
bucket_properties.append(
parser.OFPGroupBucketPropExperimenter(
type_=group_bp_type, experimenter=experimenter,
exp_type=exp_type, data=data))
else:
LOG.error('Unknown group bucket prop type: %s', p['type'])
# create bucket
bucket = parser.OFPBucket(bucket_id=bucket_id,
actions=bucket_actions,
properties=bucket_properties)
buckets.append(bucket)
group_mod = parser.OFPGroupMod(dp, cmd, group_type, group_id,
command_bucket_id, buckets,
properties)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
ofp = dp.ofproto
parser = dp.ofproto_parser
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
properties = port_config.get('properties')
prop = []
for p in properties:
type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type'])
length = None
if type_ == ofp.OFPPDPT_ETHERNET:
advertise = UTIL.ofp_port_features_from_user(p['advertise'])
prop.append(
parser.OFPPortModPropEthernet(type_, length, advertise))
elif type_ == ofp.OFPPDPT_OPTICAL:
prop.append(
parser.OFPPortModPropOptical(
type_, length, p['configure'], p['freq_lmda'],
p['fl_offset'], p['grid_span'], p['tx_pwr']))
elif type_ == ofp.OFPPDPT_EXPERIMENTER:
prop.append(
parser.OFPPortModPropExperimenter(
type_, length, p['experimenter'], p['exp_type'],
p['data']))
else:
LOG.error('Unknown port desc prop type: %s', type_)
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, prop)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, None, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter
|
kosgroup/odoo
|
refs/heads/10.0
|
addons/website_hr_recruitment/__manifest__.py
|
22
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Online Jobs',
'category': 'Website',
'version': '1.0',
'summary': 'Job Descriptions And Application Forms',
'description': """
Odoo Contact Form
====================
""",
'depends': ['website_partner', 'hr_recruitment', 'website_mail', 'website_form'],
'data': [
'security/ir.model.access.csv',
'security/website_hr_recruitment_security.xml',
'data/config_data.xml',
'views/website_hr_recruitment_templates.xml',
'views/hr_recruitment_views.xml',
],
'demo': [
'data/hr_job_demo.xml',
],
'installable': True,
}
|
waseem18/oh-mainline
|
refs/heads/master
|
vendor/packages/south/south/tests/non_managed/models.py
|
148
|
# -*- coding: UTF-8 -*-
"""
An app with a model that is not managed for testing that South does
not try to manage it in any way
"""
from django.db import models
class Legacy(models.Model):
name = models.CharField(max_length=10)
size = models.IntegerField()
class Meta:
db_table = "legacy_table"
managed = False
|
community-ssu/telepathy-gabble
|
refs/heads/master
|
tests/twisted/muc/send-error.py
|
1
|
"""
Test incoming error messages in MUC channels.
"""
import dbus
from gabbletest import exec_test
from servicetest import EventPattern
import constants as cs
import ns
from mucutil import join_muc_and_check
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
muc = 'chat@conf.localhost'
_, text_chan, test_handle, bob_handle = \
join_muc_and_check(q, bus, conn, stream, muc)
# Suppose we don't have permission to speak in this MUC. Send a message to
# the channel, and have the MUC reject it as unauthorized.
content = u"hi r ther ne warez n this chanel?"
greeting = [
dbus.Dictionary({ }, signature='sv'),
{ 'content-type': 'text/plain',
'content': content,
}
]
sent_token = dbus.Interface(text_chan, cs.CHANNEL_IFACE_MESSAGES) \
.SendMessage(greeting, dbus.UInt32(0))
stream_message, _, _ = q.expect_many(
EventPattern('stream-message'),
EventPattern('dbus-signal', signal='Sent'),
EventPattern('dbus-signal', signal='MessageSent'),
)
# computer says no
elem = stream_message.stanza
elem['from'] = 'chat@conf.localhost'
elem['to'] = 'chat@conf.localhost/test'
elem['type'] = 'error'
error = elem.addElement('error')
error['code'] = '401'
error['type'] = 'auth'
error.addElement((ns.STANZA, 'not-authorized'))
stream.send(elem)
# check that we got a failed delivery report and a SendError
send_error, received, message_received = q.expect_many(
EventPattern('dbus-signal', signal='SendError'),
EventPattern('dbus-signal', signal='Received'),
EventPattern('dbus-signal', signal='MessageReceived'),
)
PERMISSION_DENIED = 3
err, timestamp, type, text = send_error.args
assert err == PERMISSION_DENIED, send_error.args
# there's no way to tell when the original message was sent from the error stanza
assert timestamp == 0, send_error.args
# Gabble can't determine the type of the original message; see muc/test-muc.py
# assert type == 0, send_error.args
assert text == content, send_error.args
# The Text.Received signal should be a "you're not tall enough" stub
id, timestamp, sender, type, flags, text = received.args
assert sender == 0, received.args
assert type == 4, received.args # Message_Type_Delivery_Report
assert flags == 2, received.args # Non_Text_Content
assert text == '', received.args
# Check that the Messages.MessageReceived signal was a failed delivery report
assert len(message_received.args) == 1, message_received.args
parts = message_received.args[0]
# The delivery report should just be a header, no body.
assert len(parts) == 1, parts
part = parts[0]
# The intended recipient was the MUC, so there's no contact handle
# suitable for being 'message-sender'.
assert 'message-sender' not in part or part['message-sender'] == 0, part
assert part['message-type'] == 4, part # Message_Type_Delivery_Report
assert part['delivery-status'] == 3, part # Delivery_Status_Permanently_Failed
assert part['delivery-error'] == PERMISSION_DENIED, part
assert part['delivery-token'] == sent_token, part
# Check that the included echo is from us, and matches all the keys in the
# message we sent.
assert 'delivery-echo' in part, part
echo = part['delivery-echo']
assert len(echo) == len(greeting), (echo, greeting)
assert echo[0]['message-sender'] == test_handle, echo[0]
assert echo[0]['message-token'] == sent_token, echo[0]
for i in range(0, len(echo)):
for key in greeting[i]:
assert key in echo[i], (i, key, echo)
assert echo[i][key] == greeting[i][key], (i, key, echo, greeting)
if __name__ == '__main__':
exec_test(test)
|
popoffka/ponydraw
|
refs/heads/master
|
server/server.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# © 2012 Aleksejs Popovs <me@popoffka.ru>
# Licensed under MIT License. See ../LICENSE for more info.
import sys, json
import config
import argparse, os
from twisted.internet import reactor
from twisted.python import log
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
from hashlib import sha256
from cgi import escape
from entities.room import DrawingRoom
from storage.FileStorage import FileStorage
class PonyDrawServerProtocol(WebSocketServerProtocol):
def __init__(self):
self.room = None
self.name = None
def getChatMessage(self, text):
msg = {}
msg['type'] = 'chat'
msg['name'] = self.name
msg['msg'] = text
return json.dumps(msg)
def getNewLayerFailMessage(self):
msg = {}
msg['type'] = 'announcement'
msg['msg'] = 'Could not create new layer.'
return json.dumps(msg)
def onOpen(self):
self.factory.register(self)
def onMessage(self, raw, binary):
if binary:
return
message = json.loads(raw)
for i in message:
if isinstance(message[i], basestring):
message[i] = escape(message[i])
if (message['type'] != 'line'):
print message
if (message['type'] == 'register'):
if self.factory.canJoin(message['name'], sha256(message['password']).hexdigest(), message['room']):
self.room = self.factory.rooms[message['room']]
self.name = message['name']
self.factory.broadcast(self.room.setOnline(self.name, True), self.room.name, self)
self.sendMessage(json.dumps(dict(self.room.getRoomInfo(self.name).items() + {'name': self.name}.items())))
for i in self.room.layers:
msg = {}
msg['type'] = 'bunch'
msg['contents'] = []
for j in self.room.layers[i].history:
msg['contents'].append(json.loads(j))
self.sendMessage(json.dumps(msg))
else:
self.sendMessage(self.factory.getAuthFailMessage(message['room']))
self.sendClose()
elif (message['type'] == 'chat'):
if (not self.room):
return
if message['msg'].startswith('/'):
command = message['msg'][1:].split()[0]
options = (len(message['msg'][1:].split()) > 1 and message['msg'][1:].split()[1]) or ''
if (command == 'me'):
msg = {}
msg['type'] = 'announcement'
msg['msg'] = '* ' + self.name + ' ' + options
self.factory.broadcast(json.dumps(msg), self.room.name, None)
elif (command == 'passwd'):
self.room.changePassword(self.name, sha256(options).hexdigest())
msg = {}
msg['type'] = 'announcement'
msg['msg'] = 'Your password was successfully changed.'
self.sendMessage(json.dumps(msg))
else:
self.factory.broadcast(self.getChatMessage(message['msg']), self.room.name, None)
elif (message['type'] == 'line'):
if (self.room) and (self.room.canDrawOn(self.name, message['opts']['layer'])):
self.room.addToHistory(raw, message['opts']['layer'])
self.factory.broadcast(raw, self.room.name, self)
elif (message['type'] == 'newLayer'):
res = self.room.addLayer(self.name)
if res:
self.factory.broadcastDynamic(lambda user: json.dumps(dict(res.getDescription(user).items() + {'type': 'newLayer'}.items())), self.room.name, None)
else:
self.sendMessage(self.getNewLayerFailMessage())
elif (message['type'] == 'removeLayer'):
res = self.room.removeLayer(message['id'], self.name)
if res:
self.factory.broadcastDynamic(lambda user: json.dumps(dict(res.getDescription(user).items() + {'type': 'removeLayer', 'who': self.name}.items())), self.room.name, None)
elif (message['type'] == 'swapLayers'):
res = self.room.swapLayers(message['aId'], message['bId'], self.name)
if res:
for i in res:
self.factory.broadcast(json.dumps(i), self.room.name, None)
self.factory.broadcast(raw, self.room.name, None)
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
if self.room:
self.factory.broadcast(self.room.setOnline(self.name, False), self.room.name, self)
if self.room.empty():
self.factory.roomEmpty(self.room.name)
self.factory.unregister(self)
class PonyDrawServerFactory(WebSocketServerFactory):
protocol = PonyDrawServerProtocol
def __init__(self, url, storage, storageArgs):
WebSocketServerFactory.__init__(self, url)
self.clients = []
self.rooms = {}
self.storage = storage()
self.storage.open(*storageArgs)
def stopFactory(self):
for room in self.rooms:
self.storage.saveRoom(self.rooms[room])
for client in self.clients:
self.clients.remove(client)
self.storage.close()
def register(self, client):
if not client in self.clients:
print "registered client " + client.peerstr
self.clients.append(client)
def unregister(self, client):
if client in self.clients:
print "unregistered client " + client.peerstr
self.clients.remove(client)
def broadcast(self, msg, room, exc):
for c in self.clients:
if (c.room != None) and (c.room.name == room) and (c != exc):
c.sendMessage(msg)
def broadcastDynamic(self, msg, room, exc):
for c in self.clients:
if (c.room != None) and (c.room.name == room) and (c != exc):
c.sendMessage(msg(c.name))
def canJoin(self, user, password, room):
if room not in self.rooms:
if self.storage.roomInStorage(room):
self.rooms[room] = self.storage.getRoom(room)
else:
self.rooms[room] = DrawingRoom(room, (user, password), 1024, 768)
if not self.rooms[room].getUser(user):
self.rooms[room].addUser((user, password))
return self.rooms[room].getUser(user)['password'] == password
def roomEmpty(self, room):
if (self.storage.isOpen):
self.storage.saveRoom(self.rooms[room])
del self.rooms[room]
def getAuthFailMessage(self, room):
msg = {}
msg['type'] = 'joinFailure'
msg['room'] = room
msg['error'] = 'Wrong password'
return json.dumps(msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Server component for PonyDraw, a WebSockets-based multi-user drawing application.')
parser.add_argument('-P', '--pid', help='store the PID of the running server in this file')
args = parser.parse_args()
log.startLogging(config.logFile)
if not args.pid is None:
try:
pidf = open(args.pid, 'w')
pidf.write(str(os.getpid()))
pidf.close()
except:
print 'Couldn\'t store the PID'
factory = PonyDrawServerFactory(config.wsListenIP, config.storage, config.storageArgs)
listenWS(factory)
reactor.run()
if not args.pid is None:
try:
os.remove(args.pid)
except:
print 'Couldn\'t remove the PID'
|
mPowering/django-orb
|
refs/heads/master
|
docs/settings.py
|
1
|
# Django settings for docs project.
# import source code dir
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
SITE_ID = 303
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = "foobar"
DATABASES = {"default": {
"NAME": ":memory:",
"ENGINE": "django.db.backends.sqlite3",
"USER": '',
"PASSWORD": '',
"PORT": '',
}}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'orb',
)
ORB_RESOURCE_DESCRIPTION_MAX_WORDS = 150
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
#'URL': 'http://127.0.0.1:8983/solr'
# ...or for multicore...
'URL': 'http://127.0.0.1:8983/solr/mpowering',
}
}
|
yuwei0927/python
|
refs/heads/master
|
读写文件练习.py
|
1
|
f=open('record.txt')
boy=[]
girl=[]
count=1
for each_line in f:
if each_line[:6] != '======':
(role, line_spoken) = each_line.split(':',1)
if role == '小甲鱼':
boy.append(line_spoken)
if role == '小客服':
girl.append(line_spoken)
else:
file_name_boy = 'boy_' + str(count) + '.txt'
file_name_girl = 'girl_' + str(count) + '.txt'
boyFile = open(file_name_boy, 'w')
girlFile = open(file_name_girl, 'w')
boyFile.writelines(boy)
girlFile.writelines(girl)
boyFile.close()
girlFile.close()
boy=[]
girl=[]
count += 1
f.close()
|
takeflight/django
|
refs/heads/master
|
django/template/loaders/locmem.py
|
5
|
"""
Wrapper for loading templates from a plain Python dict.
"""
from django.template.base import TemplateDoesNotExist
from .base import Loader as BaseLoader
class Loader(BaseLoader):
is_usable = True
def __init__(self, templates_dict):
self.templates_dict = templates_dict
def load_template_source(self, template_name, template_dirs=None):
try:
return self.templates_dict[template_name], template_name
except KeyError:
raise TemplateDoesNotExist(template_name)
|
liucode/tempest-master
|
refs/heads/master
|
tools/colorizer.py
|
42
|
#!/usr/bin/env python
# Copyright (c) 2013, Nebula, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Display a subunit stream through a colorized unittest test runner."""
import heapq
import sys
import unittest
import subunit
import testtools
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
import win32console
red, green, blue, bold = (win32console.FOREGROUND_RED,
win32console.FOREGROUND_GREEN,
win32console.FOREGROUND_BLUE,
win32console.FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
self._colors = {'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
def get_elapsed_time_color(elapsed_time):
if elapsed_time > 1.0:
return 'red'
elif elapsed_time > 0.25:
return 'yellow'
else:
return 'green'
class NovaTestResult(testtools.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(NovaTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.num_slow_tests = 10
self.slow_tests = [] # this is a fixed-sized heap
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
self.start_time = None
self.last_time = {}
self.results = {}
self.last_written = None
def _writeElapsedTime(self, elapsed):
color = get_elapsed_time_color(elapsed)
self.colorizer.write(" %.2f" % elapsed, color)
def _addResult(self, test, *args):
try:
name = test.id()
except AttributeError:
name = 'Unknown.unknown'
test_class, test_name = name.rsplit('.', 1)
elapsed = (self._now() - self.start_time).total_seconds()
item = (elapsed, test_class, test_name)
if len(self.slow_tests) >= self.num_slow_tests:
heapq.heappushpop(self.slow_tests, item)
else:
heapq.heappush(self.slow_tests, item)
self.results.setdefault(test_class, [])
self.results[test_class].append((test_name, elapsed) + args)
self.last_time[test_class] = self._now()
self.writeTests()
def _writeResult(self, test_name, elapsed, long_result, color,
short_result, success):
if self.showAll:
self.stream.write(' %s' % str(test_name).ljust(66))
self.colorizer.write(long_result, color)
if success:
self._writeElapsedTime(elapsed)
self.stream.writeln()
else:
self.colorizer.write(short_result, color)
def addSuccess(self, test):
super(NovaTestResult, self).addSuccess(test)
self._addResult(test, 'OK', 'green', '.', True)
def addFailure(self, test, err):
if test.id() == 'process-returncode':
return
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'FAIL', 'red', 'F', False)
def addError(self, test, err):
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'ERROR', 'red', 'E', False)
def addSkip(self, test, reason=None, details=None):
super(NovaTestResult, self).addSkip(test, reason, details)
self._addResult(test, 'SKIP', 'blue', 'S', True)
def startTest(self, test):
self.start_time = self._now()
super(NovaTestResult, self).startTest(test)
def writeTestCase(self, cls):
if not self.results.get(cls):
return
if cls != self.last_written:
self.colorizer.write(cls, 'white')
self.stream.writeln()
for result in self.results[cls]:
self._writeResult(*result)
del self.results[cls]
self.stream.flush()
self.last_written = cls
def writeTests(self):
time = self.last_time.get(self.last_written, self._now())
if not self.last_written or (self._now() - time).total_seconds() > 2.0:
diff = 3.0
while diff > 2.0:
classes = self.results.keys()
oldest = min(classes, key=lambda x: self.last_time[x])
diff = (self._now() - self.last_time[oldest]).total_seconds()
self.writeTestCase(oldest)
else:
self.writeTestCase(self.last_written)
def done(self):
self.stopTestRun()
def stopTestRun(self):
for cls in list(self.results.iterkeys()):
self.writeTestCase(cls)
self.stream.writeln()
self.writeSlowTests()
def writeSlowTests(self):
# Pare out 'fast' tests
slow_tests = [item for item in self.slow_tests
if get_elapsed_time_color(item[0]) != 'green']
if slow_tests:
slow_total_time = sum(item[0] for item in slow_tests)
slow = ("Slowest %i tests took %.2f secs:"
% (len(slow_tests), slow_total_time))
self.colorizer.write(slow, 'yellow')
self.stream.writeln()
last_cls = None
# sort by name
for elapsed, cls, name in sorted(slow_tests,
key=lambda x: x[1] + x[2]):
if cls != last_cls:
self.colorizer.write(cls, 'white')
self.stream.writeln()
last_cls = cls
self.stream.write(' %s' % str(name).ljust(68))
self._writeElapsedTime(elapsed)
self.stream.writeln()
def printErrors(self):
if self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.colorizer.write("=" * 70, 'red')
self.stream.writeln()
self.colorizer.write(flavor, 'red')
self.stream.writeln(": %s" % test.id())
self.colorizer.write("-" * 70, 'red')
self.stream.writeln()
self.stream.writeln("%s" % err)
test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
if sys.version_info[0:2] <= (2, 6):
runner = unittest.TextTestRunner(verbosity=2)
else:
runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
if runner.run(test).wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)
|
mkaluza/external_chromium_org
|
refs/heads/kk44
|
third_party/markupsafe/_compat.py
|
390
|
# -*- coding: utf-8 -*-
"""
markupsafe._compat
~~~~~~~~~~~~~~~~~~
Compatibility module for different Python versions.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
string_types = (str,)
unichr = chr
int_types = (int,)
else:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
int_types = (int, long)
|
korrosivesec/crits
|
refs/heads/master
|
crits/signatures/handlers.py
|
3
|
import datetime
import hashlib
import json
import HTMLParser
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from mongoengine.base import ValidationError
from crits.core.crits_mongoengine import EmbeddedSource, create_embedded_source, json_handler
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.class_mapper import class_from_id, class_from_type
from crits.core.handlers import csv_export
from crits.core.user_tools import is_admin, user_sources, is_user_favorite
from crits.core.user_tools import is_user_subscribed
from crits.notifications.handlers import remove_user_from_notification
from crits.signatures.signature import Signature, SignatureType, SignatureDependency
from crits.services.handlers import run_triage, get_supported_services
def generate_signature_csv(request):
"""
Generate a CSV file of the Signature information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Signature)
return response
def get_id_from_link_and_version(link, version):
"""
Get the ObjectId from a link_id and version number.
:param link: The link_id of the Signature.
:type link: str
:param version: The version number of the Signature.
:type version: int
:returns: None, ObjectId
"""
signature = Signature.objects(link_id=link, version=version).only('id').first()
if not signature:
return None
else:
return signature.id
def get_signature_details(_id, analyst):
"""
Generate the data to render the Signature details template.
:param _id: The ObjectId of the Signature to get details for.
:type _id: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
if not _id:
signature = None
else:
signature = Signature.objects(id=_id, source__name__in=sources).first()
if not signature:
template = "error.html"
args = {'error': 'signature not yet available or you do not have access to view it.'}
else:
signature.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, signature.id, 'Signature')
# subscription
subscription = {
'type': 'Signature',
'id': signature.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Signature', signature.id),
}
#objects
objects = signature.sort_objects()
#relationships
relationships = signature.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Signature',
'value': signature.id
}
versions = len(Signature.objects(link_id=signature.link_id).only('id'))
#comments
comments = {'comments': signature.get_comments(),
'url_key': _id}
#screenshots
screenshots = signature.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Signature', signature.id)
# services
service_list = get_supported_services('Signature')
# analysis results
service_results = signature.get_analysis_results()
args = {'service_list': service_list,
'objects': objects,
'relationships': relationships,
'comments': comments,
'favorite': favorite,
'relationship': relationship,
"subscription": subscription,
"screenshots": screenshots,
"versions": versions,
"service_results": service_results,
"signature": signature}
return template, args
def generate_signature_versions(_id):
"""
Generate a list of available versions for this Signature.
:param _id: The ObjectId of the Signature to generate versions for.
:type _id: str
:returns: list
"""
signature = Signature.objects(id=_id).only('link_id').first()
if not signature:
return []
else:
versions = []
rvs = Signature.objects(link_id=signature.link_id).only('id',
'title',
'version',
'data')
for rv in rvs:
link = reverse('crits.signatures.views.signature_detail',
args=(rv.id,))
versions.append({'title': rv.title,
'version': rv.version,
'data': rv.data,
'link': link})
return versions
def generate_signature_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Signature
type_ = "signature"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Signature",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Signatures'",
'text': "'All'",
'click': "function () {$('#signature_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Signature'",
'text': "'New'",
'click': "function () {$('#signature_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Signatures'",
'text': "'In Progress'",
'click': "function () {$('#signature_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Signatures'",
'text': "'Analyzed'",
'click': "function () {$('#signature_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Signatures'",
'text': "'Deprecated'",
'click': "function () {$('#signature_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Signature'",
'text': "'Add Signature'",
'click': "function () {$('#new-signature').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%s_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_signature_file(data, source_name, user=None,
description=None, title=None, data_type=None,
data_type_min_version=None, data_type_max_version=None,
data_type_dependency=None, link_id=None, method='', reference='',
copy_rels=False, bucket_list=None, ticket=None):
"""
Add Signature.
:param data: The data of the Signature.
:type data: str
:param source_name: The source which provided this Signature.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param user: The user adding the Signature.
:type user: str
:param description: Description of the Signature.
:type description: str
:param title: Title of the Signature.
:type title: str
:param data_type: Datatype of the Signature.
:type data_type: str
:param data_type: Datatype of the Signature.
:type data_type_min_version: str
:param data_type_min_version: Datatype tool minimum version.
:type data_type_max_version: str
:param data_type_max_version: Datatype tool maximum version.
:type data_type_dependency: list
:param data_type_dependency: Datatype tool dependency to be run
:param link_id: LinkId to tie this to another Signature as a new version.
:type link_id: str
:param method: The method of acquiring this Signature.
:type method: str
:param reference: A reference to the source of this Signature.
:type reference: str
:param copy_rels: Copy relationships from the previous version to this one.
:type copy_rels: bool
:param bucket_list: Bucket(s) to add to this Signature
:type bucket_list: str(comma separated) or list.
:param ticket: Ticket(s) to add to this Signature
:type ticket: str(comma separated) or list.
:returns: dict with keys:
'success' (boolean),
'message' (str),
'_id' (str) if successful.
"""
if not data or not title or not data_type:
status = {
'success': False,
'message': 'No data object, title, or data type passed in'
}
return status
if not source_name:
return {"success" : False, "message" : "Missing source information."}
rdt = SignatureType.objects(name=data_type).first()
if not rdt:
status = {
'success': False,
'message': 'Invalid data type passed in'
}
return status
if len(data) <= 0:
status = {
'success': False,
'message': 'Data length <= 0'
}
return status
# generate md5 and timestamp
md5 = hashlib.md5(data).hexdigest()
timestamp = datetime.datetime.now()
# generate signature
signature = Signature()
signature.created = timestamp
signature.description = description
signature.md5 = md5
signature.data = data
signature.title = title
signature.data_type = data_type
signature.data_type_min_version = data_type_min_version
signature.data_type_max_version = data_type_max_version
if data_type_dependency:
if type(data_type_dependency) == unicode:
data_type_dependency = data_type_dependency.split(",")
for item in data_type_dependency:
if item:
item = item.strip()
signature.data_type_dependency.append(str(item))
else:
data_type_dependency = []
# generate new source information and add to sample
if isinstance(source_name, basestring) and len(source_name) > 0:
source = create_embedded_source(source_name,
date=timestamp,
method=method,
reference=reference,
analyst=user)
# this will handle adding a new source, or an instance automatically
signature.add_source(source)
elif isinstance(source_name, EmbeddedSource):
signature.add_source(source_name, method=method, reference=reference)
elif isinstance(source_name, list) and len(source_name) > 0:
for s in source_name:
if isinstance(s, EmbeddedSource):
signature.add_source(s, method=method, reference=reference)
signature.version = len(Signature.objects(link_id=link_id)) + 1
if link_id:
signature.link_id = link_id
if copy_rels:
rd2 = Signature.objects(link_id=link_id).first()
if rd2:
if len(rd2.relationships):
signature.save(username=user)
signature.reload()
for rel in rd2.relationships:
# Get object to relate to.
rel_item = class_from_id(rel.rel_type, rel.object_id)
if rel_item:
signature.add_relationship(rel_item,
rel.relationship,
rel_date=rel.relationship_date,
analyst=user)
if bucket_list:
signature.add_bucket_list(bucket_list, user)
if ticket:
signature.add_ticket(ticket, user);
# save signature
signature.save(username=user)
signature.reload()
status = {
'success': True,
'message': 'Uploaded signature',
'_id': signature.id,
'object': signature
}
return status
def update_signature_type(type_, id_, data_type, user, **kwargs):
"""
Update the Signature data type.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: ObjectId of the Signature to update.
:type id_: str
:param data_type: The data type to set.
:type data_type: str
:param user: The user updating the data type.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
signature = Signature.objects(id=id_).first()
data_type = SignatureType.objects(name=data_type).first()
if not data_type:
return None
else:
signature.data_type = data_type.name
try:
signature.save(username=user)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': str(e)}
def delete_signature_dependency(_id, username=None):
"""
Delete Signature Dependency from CRITs.
:param _id: The ObjectID of the signature dependency to delete.
:param username: The user deleting this Signature dependency.
:return: bool
"""
if is_admin(username):
signature_dependency = SignatureDependency.objects(id=_id).first()
if signature_dependency:
signature_dependency.delete(username=username)
return {'success': True}
else:
return {'success': False}
else:
return {'success': False}
def delete_signature(_id, username=None):
"""
Delete Signature from CRITs.
:param _id: The ObjectId of the Signature to delete.
:type _id: str
:param username: The user deleting this Signature.
:type username: str
:returns: bool
"""
if is_admin(username):
signature = Signature.objects(id=_id).first()
if signature:
signature.delete(username=username)
return True
else:
return False
else:
return False
def add_new_signature_dependency(data_type, analyst):
"""
Add a new signature dependency to CRITs.
:param data_type: THe new dependency to add
:type data_type: str
:param analyst: The user adding the dependency.
:type analyst: str
:return: bool
"""
if not data_type:
return False
data_type = str(data_type).strip();
try:
signature_dependency = SignatureDependency.objects(name=data_type).first()
if signature_dependency:
return False
signature_dependency = SignatureDependency()
signature_dependency.name = data_type
signature_dependency.save(username=analyst)
return True
except ValidationError:
return False
def add_new_signature_type(data_type, analyst):
"""
Add a new Signature datatype to CRITs.
:param data_type: The new datatype to add.
:type data_type: str
:param analyst: The user adding the new datatype.
:type analyst: str
:returns: bool
"""
data_type = data_type.strip()
try:
signature_type = SignatureType.objects(name=data_type).first()
if signature_type:
return False
signature_type = SignatureType()
signature_type.name = data_type
signature_type.save(username=analyst)
return True
except ValidationError:
return False
def update_dependency(type_, id_, dep, user, append=False, **kwargs):
"""
Change the dependencies needed for a signature
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param data_type_dependency: The new list of dependencies
:type data_type_dependency: list
:param user: The user setting the dependency.
:type user: str
:param append: Should be appended to dependency list?
:type append: boolean
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
data_type_dependency = h.unescape(dep)
try:
deps = data_type_dependency.split(',')
if append is False:
del obj.data_type_dependency[:]
for item in deps:
item = item.strip()
item = str(item)
if item:
add_new_signature_dependency(item, user)
obj.data_type_dependency.append(item)
obj.save(username=user)
return {'success': True, 'message': "Data type dependency set."}
except ValidationError, e:
return {'success': False, 'message': e}
def update_min_version(type_, id_, data_type_min_version, user, **kwargs):
"""
Change the min version of the data tool
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param data_type_min_version: The new min version to use.
:type data_type_min_version: str
:param user: The user setting the description.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
data_type_min_version = h.unescape(data_type_min_version)
try:
obj.data_type_min_version = data_type_min_version
obj.save(username=user)
return {'success': True, 'message': "Data type min version set."}
except ValidationError, e:
return {'success': False, 'message': e}
def update_max_version(type_, id_, data_type_max_version, user, **kwargs):
"""
Change the max version of the data tool
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param data_type_max_version: The new max version to use.
:type data_type_max_version: str
:param user: The user setting the description.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
data_type_max_version = h.unescape(data_type_max_version)
try:
obj.data_type_max_version = data_type_max_version
obj.save(username=user)
return {'success': True, 'message': "Data type max version set."}
except ValidationError, e:
return {'success': False, 'message': e}
def get_dependency_autocomplete(term):
"""
Get existing dependencies to autocomplete.
:param term: The current term (string) to look for autocomplete options.
:type term: str
:returns: list
"""
results = SignatureDependency.objects(name__istartswith=term)
deps = [b.name for b in results]
return HttpResponse(json.dumps(deps, default=json_handler),
content_type='application/json')
def update_signature_data(type_, id_, data, user, **kwargs):
"""
Change signature data for the current version
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param data: The new signature value to use.
:type data: str
:param user: The user setting the data value.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
data = h.unescape(data)
try:
obj.data = data
obj.save(username=user)
return {'success': True, 'message': "Signature value updated."}
except ValidationError, e:
return {'success': False, 'message': e}
def update_title(type_, id_, title, user, **kwargs):
"""
Change signature data for the current version
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param title: The new signature title to use.
:type title: str
:param user: The user setting the data value.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(user)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
data = h.unescape(title)
try:
obj.title = data
obj.save(username=title)
return {'success': True, 'message': "Signature title updated."}
except ValidationError, e:
return {'success': False, 'message': e}
|
liyun074/gooDay
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/common_test.py
|
2542
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
bowlofstew/changes
|
refs/heads/master
|
tests/changes/api/serializer/models/test_logchunk.py
|
3
|
from datetime import datetime
from uuid import UUID
from changes.api.serializer import serialize
from changes.models import LogSource, LogChunk
def test_simple():
logchunk = LogChunk(
id=UUID(hex='33846695b2774b29a71795a009e8168a'),
source_id=UUID(hex='0b61b8a47ec844918d372d5741187b1c'),
source=LogSource(id=UUID(hex='0b61b8a47ec844918d372d5741187b1c')),
offset=10,
size=7,
text='\x1b[0;36mnotice: foo bar',
date_created=datetime(2013, 9, 19, 22, 15, 22),
)
result = serialize(logchunk)
assert result['id'] == '33846695b2774b29a71795a009e8168a'
assert result['source']['id'] == '0b61b8a47ec844918d372d5741187b1c'
assert result['text'] == '<span class="ansi36">notice: foo bar</span>'
assert result['size'] == 7
assert result['offset'] == 10
|
KyleJamesWalker/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/centurylink/clc_modify_server.py
|
70
|
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_modify_server
short_description: modify servers in CenturyLink Cloud.
description:
- An Ansible module to modify servers in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- A list of server Ids to modify.
required: True
cpu:
description:
- How many CPUs to update on the server
required: False
default: None
memory:
description:
- Memory (in GB) to set to the server.
required: False
default: None
anti_affinity_policy_id:
description:
- The anti affinity policy id to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_name'
required: False
default: None
anti_affinity_policy_name:
description:
- The anti affinity policy name to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_id'
required: False
default: None
alert_policy_id:
description:
- The alert policy id to be associated to the server.
This is mutually exclusive with 'alert_policy_name'
required: False
default: None
alert_policy_name:
description:
- The alert policy name to be associated to the server.
This is mutually exclusive with 'alert_policy_id'
required: False
default: None
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [ True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: set the cpu count to 4 on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 4
state: present
- name: set the memory to 8GB on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
memory: 8
state: present
- name: set the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: present
- name: remove the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: absent
- name: add the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: present
- name: remove the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: absent
- name: set the memory to 16GB and cpu to 8 core on a lust if servers
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 8
memory: 16
state: present
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects that are changed
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcModifyServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
p = self.module.params
cpu = p.get('cpu')
memory = p.get('memory')
state = p.get('state')
if state == 'absent' and (cpu or memory):
return self.module.fail_json(
msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to modify: %s' %
server_ids)
(changed, server_dict_array, changed_server_ids) = self._modify_servers(
server_ids=server_ids)
self.module.exit_json(
changed=changed,
server_ids=changed_server_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
cpu=dict(),
memory=dict(),
anti_affinity_policy_id=dict(),
anti_affinity_policy_name=dict(),
alert_policy_id=dict(),
alert_policy_name=dict(),
wait=dict(type='bool', default=True)
)
mutually_exclusive = [
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name']
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: the error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex.message)
def _modify_servers(self, server_ids):
"""
modify the servers configuration on the provided list
:param server_ids: list of servers to modify
:return: a list of dictionaries with server information about the servers that were modified
"""
p = self.module.params
state = p.get('state')
server_params = {
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
'alert_policy_id': p.get('alert_policy_id'),
'alert_policy_name': p.get('alert_policy_name'),
}
changed = False
server_changed = False
aa_changed = False
ap_changed = False
server_dict_array = []
result_server_ids = []
request_list = []
changed_servers = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return self.module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
for server in servers:
if state == 'present':
server_changed, server_result = self._ensure_server_config(
server, server_params)
if server_result:
request_list.append(server_result)
aa_changed = self._ensure_aa_policy_present(
server,
server_params)
ap_changed = self._ensure_alert_policy_present(
server,
server_params)
elif state == 'absent':
aa_changed = self._ensure_aa_policy_absent(
server,
server_params)
ap_changed = self._ensure_alert_policy_absent(
server,
server_params)
if server_changed or aa_changed or ap_changed:
changed_servers.append(server)
changed = True
self._wait_for_requests(self.module, request_list)
self._refresh_servers(self.module, changed_servers)
for server in changed_servers:
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
def _ensure_server_config(
self, server, server_params):
"""
ensures the server is updated with the provided cpu and memory
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
cpu = server_params.get('cpu')
memory = server_params.get('memory')
changed = False
result = None
if not cpu:
cpu = server.cpu
if not memory:
memory = server.memory
if memory != server.memory or cpu != server.cpu:
if not self.module.check_mode:
result = self._modify_clc_server(
self.clc,
self.module,
server.id,
cpu,
memory)
changed = True
return changed, result
@staticmethod
def _modify_clc_server(clc, module, server_id, cpu, memory):
"""
Modify the memory or CPU of a clc server.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param server_id: id of the server to modify
:param cpu: the new cpu value
:param memory: the new memory value
:return: the result of CLC API call
"""
result = None
acct_alias = clc.v2.Account.GetAlias()
try:
# Update the server configuration
job_obj = clc.v2.API.Call('PATCH',
'servers/%s/%s' % (acct_alias,
server_id),
json.dumps([{"op": "set",
"member": "memory",
"value": memory},
{"op": "set",
"member": "cpu",
"value": cpu}]))
result = clc.v2.Requests(job_obj)
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to update the server configuration for server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process modify server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
def _ensure_aa_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided anti affinity policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id != current_aa_policy_id:
self._modify_aa_policy(
self.clc,
self.module,
acct_alias,
server.id,
aa_policy_id)
changed = True
return changed
def _ensure_aa_policy_absent(
self, server, server_params):
"""
ensures the the provided anti affinity policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id == current_aa_policy_id:
self._delete_aa_policy(
self.clc,
self.module,
acct_alias,
server.id)
changed = True
return changed
@staticmethod
def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
"""
modifies the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param aa_policy_id: the anti affinity policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('PUT',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({"id": aa_policy_id}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _delete_aa_policy(clc, module, acct_alias, server_id):
"""
Delete the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(
msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
alias, str(ex.response_text)))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _get_aa_policy_id_of_server(clc, module, alias, server_id):
"""
retrieves the anti affinity policy id of the server based on the CLC server id
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param server_id: the CLC server id
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
result = clc.v2.API.Call(
method='GET', url='servers/%s/%s/antiAffinityPolicy' %
(alias, server_id))
aa_policy_id = result.get('id')
except APIFailedResponse as ex:
if ex.response_status_code != 404:
module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
server_id, str(ex.response_text)))
return aa_policy_id
def _ensure_alert_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided alert policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and not self._alert_policy_exists(
server, alert_policy_id):
self._add_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
def _ensure_alert_policy_absent(
self, server, server_params):
"""
ensures the alert policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and self._alert_policy_exists(
server, alert_policy_id):
self._remove_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
@staticmethod
def _add_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
add the alert policy to CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('POST',
'servers/%s/%s/alertPolicies' % (
acct_alias,
server_id),
json.dumps({"id": alert_policy_id}))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _remove_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
remove the alert policy to the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/alertPolicies/%s'
% (acct_alias, server_id, alert_policy_id))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
retrieves the alert policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
try:
alert_policies = clc.v2.API.Call(method='GET',
url='alertPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
alias, str(ex.response_text)))
for alert_policy in alert_policies.get('items'):
if alert_policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = alert_policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _alert_policy_exists(server, alert_policy_id):
"""
Checks if the alert policy exists for the server
:param server: the clc server object
:param alert_policy_id: the alert policy
:return: True: if the given alert policy id associated to the server, False otherwise
"""
result = False
alert_policies = server.alertPolicies
if alert_policies:
for alert_policy in alert_policies:
if alert_policy.get('id') == alert_policy_id:
result = True
return result
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcModifyServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_modify_server = ClcModifyServer(module)
clc_modify_server.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
|
potassco/clingo
|
refs/heads/master
|
examples/clingo/controller-threads/controller.py
|
1
|
#!/usr/bin/env python
import os
import readline
import atexit
import signal
from clingo import Control, Function, Number
from threading import Thread, Condition
class Connection:
def __init__(self):
self.condition = Condition()
self.messages = []
def receive(self, timeout=None):
self.condition.acquire()
while len(self.messages) == 0:
self.condition.wait(timeout)
message = self.messages.pop()
self.condition.release()
return message
def send(self, message):
self.condition.acquire()
self.messages.append(message)
self.condition.notify()
self.condition.release()
class Controller:
def __init__(self):
histfile = os.path.join(os.path.expanduser("~"), ".controller")
try: readline.read_history_file(histfile)
except IOError: pass
readline.parse_and_bind('tab: complete')
def complete(commands, text, state):
matches = []
if state == 0: matches = [ c for c in commands if c.startswith(text) ]
return matches[state] if state < len(matches) else None
readline.set_completer(lambda text, state: complete(['more_pigeon_please', 'less_pigeon_please', 'solve', 'exit'], text, state))
atexit.register(readline.write_history_file, histfile)
self.input = Connection()
self.output = None
def register_connection(self, connection):
self.output = connection
def interrupt(self, a, b):
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.output.send("interrupt")
def run(self):
print("")
print("this prompt accepts the following commands:")
print(" solve - start solving")
print(" exit/EOF - terminate the solver")
print(" Ctrl-C - interrupt current search")
print(" less_pigeon_please - select an easy problem")
print(" more_pigeon_please - select a difficult problem")
print("")
pyInt = signal.getsignal(signal.SIGINT)
while True:
signal.signal(signal.SIGINT, pyInt)
try:
try: input_ = raw_input
except NameError: input_ = input
line = input_('> ')
signal.signal(signal.SIGINT, signal.SIG_IGN)
except EOFError:
signal.signal(signal.SIGINT, signal.SIG_IGN)
line = "exit"
print(line)
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN)
print
continue
if line == "solve":
print("Solving...")
self.output.send("solve")
signal.signal(signal.SIGINT, self.interrupt)
# NOTE: we need a timeout to catch signals
msg = self.input.receive(1000)
print(msg)
elif line == "exit":
self.output.send("exit")
break
elif line in ["less_pigeon_please", "more_pigeon_please"]:
self.output.send(line)
else:
print("unknown command: " + line)
class SolveThread(Thread):
STATE_SOLVE = 1
STATE_IDLE = 2
STATE_EXIT = 3
def __init__(self, connection):
Thread.__init__(self)
self.k = 0
self.prg = Control()
self.prg.load("client.lp")
self.prg.ground([("pigeon", []), ("sleep", [Number(self.k)])])
self.prg.assign_external(Function("sleep", [Number(self.k)]), True)
self.state = SolveThread.STATE_IDLE
self.input = Connection()
self.output = connection
def on_model(self, model):
self.output.send("answer: " + str(model)),
def on_finish(self, ret):
self.output.send("finish: " + str(ret) + (" (INTERRUPTED)" if ret.interrupted else ""))
def handle_message(self, msg):
if msg == "interrupt":
self.state = SolveThread.STATE_IDLE
elif msg == "exit":
self.state = SolveThread.STATE_EXIT
elif msg == "less_pigeon_please":
self.prg.assign_external(Function("p"), False)
self.state = SolveThread.STATE_IDLE
elif msg == "more_pigeon_please":
self.prg.assign_external(Function("p"), True)
self.state = SolveThread.STATE_IDLE
elif msg == "solve":
self.state = SolveThread.STATE_SOLVE
else: raise(RuntimeError("unexpected message: " + msg))
def run(self):
while True:
if self.state == SolveThread.STATE_SOLVE:
f = self.prg.solve(on_model=self.on_model, on_finish=self.on_finish, async_=True)
msg = self.input.receive()
if self.state == SolveThread.STATE_SOLVE:
f.cancel()
ret = f.get()
else:
ret = None
self.handle_message(msg)
if self.state == SolveThread.STATE_EXIT:
return
elif ret is not None and not ret.unknown:
self.k = self.k + 1
self.prg.ground([("sleep", [Number(self.k)])])
self.prg.release_external(Function("sleep", [Number(self.k-1)]))
self.prg.assign_external(Function("sleep", [Number(self.k)]), True)
ct = Controller()
st = SolveThread(ct.input)
ct.register_connection(st.input)
st.start()
ct.run()
st.join()
|
dhermes/google-cloud-python
|
refs/heads/master
|
spanner/tests/system/test_system.py
|
2
|
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import math
import operator
import os
import struct
import threading
import time
import unittest
import uuid
import pytest
from google.api_core import exceptions
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1.proto.type_pb2 import ARRAY
from google.cloud.spanner_v1.proto.type_pb2 import BOOL
from google.cloud.spanner_v1.proto.type_pb2 import BYTES
from google.cloud.spanner_v1.proto.type_pb2 import DATE
from google.cloud.spanner_v1.proto.type_pb2 import FLOAT64
from google.cloud.spanner_v1.proto.type_pb2 import INT64
from google.cloud.spanner_v1.proto.type_pb2 import STRING
from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP
from google.cloud.spanner_v1.proto.type_pb2 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner import Client
from google.cloud.spanner import KeyRange
from google.cloud.spanner import KeySet
from google.cloud.spanner import BurstyPool
from google.cloud.spanner import COMMIT_TIMESTAMP
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None
if CREATE_INSTANCE:
INSTANCE_ID = "google-cloud" + unique_resource_id("-")
else:
INSTANCE_ID = os.environ.get(
"GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest"
)
EXISTING_INSTANCES = []
COUNTERS_TABLE = "counters"
COUNTERS_COLUMNS = ("name", "value")
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
return len(database.ddl_statements) == len(DDL_STATEMENTS)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
if CREATE_INSTANCE:
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if "-us-" in config.name]
if not configs:
raise ValueError("List instance configs failed in module set up.")
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, config_name)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (
instance in EXISTING_INSTANCES or instance == Config.INSTANCE
)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
instance.reload()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = "Foo Bar Baz"
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = "contacts"
COLUMNS = ("contact_id", "first_name", "last_name", "email")
ROW_DATA = (
(1, u"Phred", u"Phlyntstone", u"phred@example.com"),
(2, u"Bharney", u"Rhubble", u"bharney@example.com"),
(3, u"Wylma", u"Phlyntstone", u"wylma@example.com"),
)
ALL = KeySet(all_=True)
SQL = "SELECT * FROM contacts ORDER BY contact_id"
_recurse_into_lists = True
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, DatetimeWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
self._check_cell_data(found_cell, expected_cell)
def _check_cell_data(self, found_cell, expected_cell):
if isinstance(found_cell, DatetimeWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
elif isinstance(found_cell, list) and self._recurse_into_lists:
self.assertEqual(len(found_cell), len(expected_cell))
for found_item, expected_item in zip(found_cell, expected_cell):
self._check_cell_data(found_item, expected_item)
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()
]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool(labels={"testcase": "create_database"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [
database.database_id for database in Config.INSTANCE.list_databases()
]
self.assertIn(temp_db_id, database_ids)
def test_table_not_found(self):
temp_db_id = "temp_db" + unique_resource_id("_")
correct_table = "MyTable"
incorrect_table = "NotMyTable"
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
"CREATE TABLE {} (\n"
" Id STRING(36) NOT NULL,\n"
" Field1 STRING(36) NOT NULL\n"
") PRIMARY KEY (Id)"
).format(correct_table)
index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id, ddl_statements=[create_table, index]
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound) as exc_info:
temp_db.create()
expected = "Table not found: {0}".format(incorrect_table)
self.assertEqual(exc_info.exception.args, (expected,))
@pytest.mark.skip(
reason=(
"update_dataset_ddl() has a flaky timeout"
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/"
"5629"
)
)
def test_update_database_ddl_with_operation_id(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
# random but shortish always start with letter
operation_id = 'a' + str(uuid.uuid4())[:8]
operation = temp_db.update_ddl(DDL_STATEMENTS, operation_id=operation_id)
self.assertEqual(operation_id, operation.operation.name.split('/')[-1])
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(DDL_STATEMENTS))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name="id_1")
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name="id_1")
self._db.run_in_transaction(_unit_of_work, name="id_2")
with self._db.snapshot() as after:
rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321)
POS_INF = float("+inf")
NEG_INF = float("-inf")
OTHER_NAN, = struct.unpack("<d", b"\x01\x00\x01\x00\x00\x00\xf8\xff")
BYTES_1 = b"Ymlu"
BYTES_2 = b"Ym9vdHM="
ALL_TYPES_TABLE = "all_types"
ALL_TYPES_COLUMNS = (
"pkey",
"int_value",
"int_array",
"bool_value",
"bool_array",
"bytes_value",
"bytes_array",
"date_value",
"date_array",
"float_value",
"float_array",
"string_value",
"string_array",
"timestamp_value",
"timestamp_array",
)
AllTypesRowData = collections.namedtuple("AllTypesRowData", ALL_TYPES_COLUMNS)
AllTypesRowData.__new__.__defaults__ = tuple([None for colum in ALL_TYPES_COLUMNS])
ALL_TYPES_ROWDATA = (
# all nulls
AllTypesRowData(pkey=0),
# Non-null values
AllTypesRowData(pkey=101, int_value=123),
AllTypesRowData(pkey=102, bool_value=False),
AllTypesRowData(pkey=103, bytes_value=BYTES_1),
AllTypesRowData(pkey=104, date_value=SOME_DATE),
AllTypesRowData(pkey=105, float_value=1.4142136),
AllTypesRowData(pkey=106, string_value=u"VALUE"),
AllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
AllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
# empty array values
AllTypesRowData(pkey=201, int_array=[]),
AllTypesRowData(pkey=202, bool_array=[]),
AllTypesRowData(pkey=203, bytes_array=[]),
AllTypesRowData(pkey=204, date_array=[]),
AllTypesRowData(pkey=205, float_array=[]),
AllTypesRowData(pkey=206, string_array=[]),
AllTypesRowData(pkey=207, timestamp_array=[]),
# non-empty array values, including nulls
AllTypesRowData(pkey=301, int_array=[123, 456, None]),
AllTypesRowData(pkey=302, bool_array=[True, False, None]),
AllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
AllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
AllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
AllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
AllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
)
class TestSessionAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_sessions" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "session_api"})
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
def test_batch_insert_then_read_string_array_of_string(self):
TABLE = "string_plus_array_of_string"
COLUMNS = ["id", "name", "tags"]
ROWDATA = [
(0, None, None),
(1, "phred", ["yabba", "dabba", "do"]),
(2, "bharney", []),
(3, "wylma", ["oh", None, "phred"]),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(TABLE, self.ALL)
batch.insert(TABLE, COLUMNS, ROWDATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(TABLE, COLUMNS, self.ALL))
self._check_rows_data(rows, expected=ROWDATA)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(ALL_TYPES_TABLE, self.ALL)
batch.insert(ALL_TYPES_TABLE, ALL_TYPES_COLUMNS, ALL_TYPES_ROWDATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(ALL_TYPES_TABLE, ALL_TYPES_COLUMNS, self.ALL))
self._check_rows_data(rows, expected=ALL_TYPES_ROWDATA)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_batch_insert_w_commit_timestamp(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
table = "users_history"
columns = ["id", "commit_ts", "name", "email", "deleted"]
user_id = 1234
name = "phred"
email = "phred@example.com"
row_data = [[user_id, COMMIT_TIMESTAMP, name, email, False]]
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, row_data)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self.assertEqual(len(rows), 1)
r_id, commit_ts, r_name, r_email, deleted = rows[0]
self.assertEqual(r_id, user_id)
self.assertEqual(commit_ts, batch.committed)
self.assertEqual(r_name, name)
self.assertEqual(r_email, email)
self.assertFalse(deleted)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Aborted)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
def _generate_insert_statements(self):
insert_template = "INSERT INTO {table} ({column_list}) " "VALUES ({row_data})"
for row in self.ROW_DATA:
yield insert_template.format(
table=self.TABLE,
column_list=", ".join(self.COLUMNS),
row_data='{}, "{}", "{}", "{}"'.format(*row),
)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_sql_w_dml_read_rollback(self):
# [START spanner_test_dml_rollback_txn_not_committed]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
result = transaction.execute_sql(insert_statement)
list(result) # iterate to get stats
self.assertEqual(result.stats.row_count_exact, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_rollback_txn_not_committed]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_read_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_then_insert_commit(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA[1:])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_update]
# [END spanner_test_dml_with_mutation]
def test_execute_partitioned_dml(self):
# [START spanner_test_dml_partioned_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
delete_statement = "DELETE FROM {} WHERE true".format(self.TABLE)
def _setup_table(txn):
txn.execute_update(delete_statement)
for insert_statement in self._generate_insert_statements():
txn.execute_update(insert_statement)
committed = self._db.run_in_transaction(_setup_table)
with self._db.snapshot(read_timestamp=committed) as snapshot:
before_pdml = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(before_pdml)
nonesuch = "nonesuch@example.com"
target = "phred@example.com"
update_statement = (
"UPDATE {table} SET {table}.email = @email " "WHERE {table}.email = @target"
).format(table=self.TABLE)
row_count = self._db.execute_partitioned_dml(
update_statement,
params={"email": nonesuch, "target": target},
param_types={"email": Type(code=STRING), "target": Type(code=STRING)},
)
self.assertEqual(row_count, 1)
row = self.ROW_DATA[0]
updated = [row[:3] + (nonesuch,)] + list(self.ROW_DATA[1:])
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_update = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_update, updated)
row_count = self._db.execute_partitioned_dml(delete_statement)
self.assertEqual(row_count, len(self.ROW_DATA))
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_delete = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_delete, [])
# [END spanner_test_dml_partioned_dml_update]
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]]
)
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction, args=(unit_of_work, pkey)
)
for txn_session in txn_sessions
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
PKEY = "read_w_concurrent_updates"
self._transaction_concurrency_helper(self._read_w_concurrent_update, PKEY)
def _query_w_concurrent_update(self, transaction, pkey):
SQL = "SELECT * FROM counters WHERE name = @name"
rows = list(
transaction.execute_sql(
SQL, params={"name": pkey}, param_types={"name": Type(code=STRING)}
)
)
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
PKEY = "query_w_concurrent_updates"
self._transaction_concurrency_helper(self._query_w_concurrent_update, PKEY)
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[trigger.KEY1, 0], [trigger.KEY2, 0]]
)
provoker = threading.Thread(target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self._check_row_data(rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield [
index,
"First%09d" % (index,),
"Last%09d" % (max_index - index),
"test-%09d@example.com" % (index,),
]
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
# [START spanner_test_single_key_index_read]
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
# [END spanner_test_single_key_index_read]
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, KeySet(keys=expected), index="name")
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
ROW_COUNT = 400
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
ROW_COUNT = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
pool = BurstyPool(labels={"testcase": "read_w_index"})
temp_db = Config.INSTANCE.database(
"test_read" + unique_resource_id("_"),
ddl_statements=DDL_STATEMENTS + EXTRA_DDL,
pool=pool,
)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
committed = self._set_up_table(ROW_COUNT, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index="contacts_by_last_name"
)
)
expected = list(
reversed([(row[0], row[2]) for row in self._row_data(ROW_COUNT)])
)
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
# [START spanner_test_single_key_read]
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
# [END spanner_test_single_key_read]
def test_empty_read(self):
# [START spanner_test_empty_read]
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
# [END spanner_test_empty_read]
def test_read_w_multiple_keys(self):
ROW_COUNT = 40
indices = [0, 5, 17]
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE,
self.COLUMNS,
KeySet(keys=[(index,) for index in indices]),
)
)
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
ROW_COUNT = 3000
LIMIT = 100
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=LIMIT))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = all_data_rows[:LIMIT]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
ROW_COUNT = 3000
START = 1000
END = 2000
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(ROW_COUNT))
single_key = KeyRange(start_closed=[START], end_open=[START + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : START + 1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[START], end_closed=[END])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : END + 1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[START], end_open=[END])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START:END]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[START], end_open=[END])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START + 1 : END]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[START], end_closed=[END])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START + 1 : END + 1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[start:],
("start_closed", "end_open"): [],
("start_open", "end_closed"): all_data_rows[start + 1 :],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[: end + 1],
("start_closed", "end_open"): all_data_rows[:end],
("start_open", "end_closed"): [],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : start + 1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : end + 1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end + 1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start:end]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end]
self.assertEqual(rows, expected)
def test_partition_read_w_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
batches = batch_txn.generate_read_batches(
self.TABLE, columns, KeySet(all_=True), index="name"
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, expected)
batch_txn.close()
def test_execute_sql_w_manual_consume(self):
ROW_COUNT = 3000
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True
):
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
with database.snapshot() as snapshot:
rows = list(
snapshot.execute_sql(sql, params=params, param_types=param_types)
)
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
ROW_COUNT = 40
SQL = "SELECT * FROM {}".format(self.TABLE)
self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=SQL,
params=None,
param_types=None,
expected=[[[["a", 1], ["b", 2]]]],
)
def test_execute_sql_returning_empty_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 2 AS C1) X "
"JOIN (SELECT 1 AS C2) Y "
"ON X.C1 = Y.C2 "
"ORDER BY C1 ASC)"
)
self._db.snapshot(multi_use=True)
self._check_sql_results(
self._db, sql=SQL, params=None, param_types=None, expected=[[[]]]
)
def test_invalid_type(self):
table = "counters"
columns = ("name", "value")
valid_input = (("", 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ""),)
with self.assertRaises(exceptions.FailedPrecondition) as exc_info:
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
error_msg = (
"Invalid value for column value in table " "counters: Expected INT64."
)
self.assertIn(error_msg, str(exc_info.exception))
def test_execute_sql_select_1(self):
self._db.snapshot(multi_use=True)
# Hello, world query
self._check_sql_results(
self._db,
sql="SELECT 1",
params=None,
param_types=None,
expected=[(1,)],
order=False,
)
def _bind_test_helper(
self, type_name, single_value, array_value, expected_array_value=None
):
self._db.snapshot(multi_use=True)
# Bind a non-null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": single_value},
param_types={"v": Type(code=type_name)},
expected=[(single_value,)],
order=False,
)
# Bind a null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": Type(code=type_name)},
expected=[(None,)],
order=False,
)
# Bind an array of <type_name>
array_type = Type(code=ARRAY, array_element_type=Type(code=type_name))
if expected_array_value is None:
expected_array_value = array_value
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": array_value},
param_types={"v": array_type},
expected=[(expected_array_value,)],
order=False,
)
# Bind an empty array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": []},
param_types={"v": array_type},
expected=[([],)],
order=False,
)
# Bind a null array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": array_type},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_string_bindings(self):
self._bind_test_helper(STRING, "Phred", ["Phred", "Bharney"])
def test_execute_sql_w_bool_bindings(self):
self._bind_test_helper(BOOL, True, [True, False, True])
def test_execute_sql_w_int64_bindings(self):
self._bind_test_helper(INT64, 42, [123, 456, 789])
def test_execute_sql_w_float64_bindings(self):
self._bind_test_helper(FLOAT64, 42.3, [12.3, 456.0, 7.89])
def test_execute_sql_w_float_bindings_transfinite(self):
# Find -inf
self._check_sql_results(
self._db,
sql="SELECT @neg_inf",
params={"neg_inf": NEG_INF},
param_types={"neg_inf": Type(code=FLOAT64)},
expected=[(NEG_INF,)],
order=False,
)
# Find +inf
self._check_sql_results(
self._db,
sql="SELECT @pos_inf",
params={"pos_inf": POS_INF},
param_types={"pos_inf": Type(code=FLOAT64)},
expected=[(POS_INF,)],
order=False,
)
def test_execute_sql_w_bytes_bindings(self):
self._bind_test_helper(BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"])
def test_execute_sql_w_timestamp_bindings(self):
import pytz
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
timestamp_1 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 12, nanosecond=345612789
)
timestamp_2 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 13, nanosecond=456127893
)
timestamps = [timestamp_1, timestamp_2]
# In round-trip, timestamps acquire a timezone value.
expected_timestamps = [
timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps
]
self._recurse_into_lists = False
self._bind_test_helper(TIMESTAMP, timestamp_1, timestamps, expected_timestamps)
def test_execute_sql_w_date_bindings(self):
import datetime
dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)]
self._bind_test_helper(DATE, SOME_DATE, dates)
def test_execute_sql_w_query_param_struct(self):
NAME = "Phred"
COUNT = 123
SIZE = 23.456
HEIGHT = 188.0
WEIGHT = 97.6
record_type = param_types.Struct(
[
param_types.StructField("name", param_types.STRING),
param_types.StructField("count", param_types.INT64),
param_types.StructField("size", param_types.FLOAT64),
param_types.StructField(
"nested",
param_types.Struct(
[
param_types.StructField("height", param_types.FLOAT64),
param_types.StructField("weight", param_types.FLOAT64),
]
),
),
]
)
# Query with null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": None},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (None, None, None, None)},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, nested NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.nested.weight",
params={"r": (None, None, None, (None, None))},
param_types={"r": record_type},
expected=[(None,)],
order=False,
)
# Query with non-null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (NAME, COUNT, SIZE, (HEIGHT, WEIGHT))},
param_types={"r": record_type},
expected=[(NAME, COUNT, SIZE, WEIGHT)],
order=False,
)
# Query with empty struct, explicitly empty type
empty_type = param_types.Struct([])
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": ()},
param_types={"r": empty_type},
expected=[(False,)],
order=False,
)
# Query with null struct, explicitly empty type
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": None},
param_types={"r": empty_type},
expected=[(True,)],
order=False,
)
# Query with equality check for struct value
struct_equality_query = (
"SELECT " '@struct_param=STRUCT<threadf INT64, userf STRING>(1,"bob")'
)
struct_type = param_types.Struct(
[
param_types.StructField("threadf", param_types.INT64),
param_types.StructField("userf", param_types.STRING),
]
)
self._check_sql_results(
self._db,
sql=struct_equality_query,
params={"struct_param": (1, "bob")},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with nullness test for struct
self._check_sql_results(
self._db,
sql="SELECT @struct_param IS NULL",
params={"struct_param": None},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with null array-of-struct
array_elem_type = param_types.Struct(
[param_types.StructField("threadid", param_types.INT64)]
)
array_type = param_types.Array(array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": None},
param_types={"struct_arr_param": array_type},
expected=[],
order=False,
)
# Query with non-null array-of-struct
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": [(123,), (456,)]},
param_types={"struct_arr_param": array_type},
expected=[(123,), (456,)],
order=False,
)
# Query with null array-of-struct field
struct_type_with_array_field = param_types.Struct(
[
param_types.StructField("intf", param_types.INT64),
param_types.StructField("arraysf", array_type),
]
)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, None)},
param_types={"struct_param": struct_type_with_array_field},
expected=[],
order=False,
)
# Query with non-null array-of-struct field
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, ((456,), (789,)))},
param_types={"struct_param": struct_type_with_array_field},
expected=[(456,), (789,)],
order=False,
)
# Query with anonymous / repeated-name fields
anon_repeated_array_elem_type = param_types.Struct(
[
param_types.StructField("", param_types.INT64),
param_types.StructField("", param_types.STRING),
]
)
anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT CAST(t as STRUCT<threadid INT64, userid STRING>).* "
"FROM UNNEST(@struct_param) t",
params={"struct_param": [(123, "abcdef")]},
param_types={"struct_param": anon_repeated_array_type},
expected=[(123, "abcdef")],
order=False,
)
# Query and return a struct parameter
value_type = param_types.Struct(
[
param_types.StructField("message", param_types.STRING),
param_types.StructField("repeat", param_types.INT64),
]
)
value_query = (
"SELECT ARRAY(SELECT AS STRUCT message, repeat "
"FROM (SELECT @value.message AS message, "
"@value.repeat AS repeat)) AS value"
)
self._check_sql_results(
self._db,
sql=value_query,
params={"value": ("hello", 1)},
param_types={"value": value_type},
expected=[([["hello", 1]],)],
order=False,
)
def test_execute_sql_returning_transfinite_floats(self):
with self._db.snapshot(multi_use=True) as snapshot:
# Query returning -inf, +inf, NaN as column values
rows = list(
snapshot.execute_sql(
"SELECT "
'CAST("-inf" AS FLOAT64), '
'CAST("+inf" AS FLOAT64), '
'CAST("NaN" AS FLOAT64)'
)
)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], float("-inf"))
self.assertEqual(rows[0][1], float("+inf"))
# NaNs cannot be compared by equality.
self.assertTrue(math.isnan(rows[0][2]))
# Query returning array of -inf, +inf, NaN as one column
rows = list(
snapshot.execute_sql(
"SELECT"
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'
)
)
self.assertEqual(len(rows), 1)
float_array, = rows[0]
self.assertEqual(float_array[0], float("-inf"))
self.assertEqual(float_array[1], float("+inf"))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
def test_partition_query(self):
row_count = 40
sql = "SELECT * FROM {}".format(self.TABLE)
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
for batch in batch_txn.generate_query_batches(sql):
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, all_data_rows)
batch_txn.close()
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
cls._db = database
def _verify_one_column(self, table_desc):
sql = "SELECT chunk_me FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = "key1"
KEY2 = "key2"
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]]
)
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
|
Endika/django
|
refs/heads/master
|
tests/auth_tests/urls.py
|
80
|
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.urls import urlpatterns as auth_urlpatterns
from django.contrib.messages.api import info
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.views.decorators.cache import never_cache
class CustomRequestAuthenticationForm(AuthenticationForm):
def __init__(self, request, *args, **kwargs):
assert isinstance(request, HttpRequest)
super(CustomRequestAuthenticationForm, self).__init__(request, *args, **kwargs)
@never_cache
def remote_user_auth_view(request):
"Dummy view for remote user tests"
t = Template("Username is {{ user }}.")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
def auth_processor_no_attr_access(request):
render(request, 'context_processors/auth_attrs_no_access.html')
# *After* rendering, we check whether the session was accessed
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_attr_access(request):
render(request, 'context_processors/auth_attrs_access.html')
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_user(request):
return render(request, 'context_processors/auth_attrs_user.html')
def auth_processor_perms(request):
return render(request, 'context_processors/auth_attrs_perms.html')
def auth_processor_perm_in_perms(request):
return render(request, 'context_processors/auth_attrs_perm_in_perms.html')
def auth_processor_messages(request):
info(request, "Message 1")
return render(request, 'context_processors/auth_attrs_messages.html')
def userpage(request):
pass
def custom_request_auth_login(request):
return views.login(request, authentication_form=CustomRequestAuthenticationForm)
# special urls for auth test cases
urlpatterns = auth_urlpatterns + [
url(r'^logout/custom_query/$', views.logout, dict(redirect_field_name='follow')),
url(r'^logout/next_page/$', views.logout, dict(next_page='/somewhere/')),
url(r'^logout/next_page/named/$', views.logout, dict(next_page='password_reset')),
url(r'^remote_user/$', remote_user_auth_view),
url(r'^password_reset_from_email/$', views.password_reset, dict(from_email='staffmember@example.com')),
url(r'^password_reset_extra_email_context/$', views.password_reset,
dict(extra_email_context=dict(greeting='Hello!'))),
url(r'^password_reset/custom_redirect/$', views.password_reset, dict(post_reset_redirect='/custom/')),
url(r'^password_reset/custom_redirect/named/$', views.password_reset, dict(post_reset_redirect='password_reset')),
url(r'^password_reset/html_email_template/$', views.password_reset,
dict(html_email_template_name='registration/html_password_reset_email.html')),
url(r'^reset/custom/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
dict(post_reset_redirect='/custom/')),
url(r'^reset/custom/named/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
dict(post_reset_redirect='password_reset')),
url(r'^password_change/custom/$', views.password_change, dict(post_change_redirect='/custom/')),
url(r'^password_change/custom/named/$', views.password_change, dict(post_change_redirect='password_reset')),
url(r'^login_required/$', login_required(views.password_reset)),
url(r'^login_required_login_url/$', login_required(views.password_reset, login_url='/somewhere/')),
url(r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access),
url(r'^auth_processor_attr_access/$', auth_processor_attr_access),
url(r'^auth_processor_user/$', auth_processor_user),
url(r'^auth_processor_perms/$', auth_processor_perms),
url(r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms),
url(r'^auth_processor_messages/$', auth_processor_messages),
url(r'^custom_request_auth_login/$', custom_request_auth_login),
url(r'^userpage/(.+)/$', userpage, name="userpage"),
# This line is only required to render the password reset with is_admin=True
url(r'^admin/', admin.site.urls),
]
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/scrapy/scrapy/http/response/text.py
|
16
|
"""
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
import re
import codecs
from scrapy.http.response.dammit import UnicodeDammit
from scrapy.http.response import Response
from scrapy.utils.python import memoizemethod_noargs
from scrapy.utils.encoding import encoding_exists, resolve_encoding
from scrapy.conf import settings
# Python decoder doesn't follow unicode standard when handling
# bad utf-8 encoded strings. see http://bugs.python.org/issue8271
codecs.register_error('scrapy_replace', lambda exc: (u'\ufffd', exc.start+1))
class TextResponse(Response):
_DEFAULT_ENCODING = settings['DEFAULT_RESPONSE_ENCODING']
_ENCODING_RE = re.compile(r'charset=([\w-]+)', re.I)
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode url - %s has no encoding' %
type(self).__name__)
self._url = url.encode(self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = ''
if isinstance(body, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._get_encoding(infer=True)
def _get_encoding(self, infer=False):
enc = self._declared_encoding()
if enc and not encoding_exists(enc):
enc = None
if not enc and infer:
enc = self._body_inferred_encoding()
if not enc:
enc = self._DEFAULT_ENCODING
return resolve_encoding(enc)
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
if self._cached_ubody is None:
self._cached_ubody = self.body.decode(self.encoding, 'scrapy_replace')
return self._cached_ubody
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get('Content-Type')
if content_type:
m = self._ENCODING_RE.search(content_type)
if m:
encoding = m.group(1)
if encoding_exists(encoding):
return encoding
def _body_inferred_encoding(self):
if self._cached_benc is None:
enc = self._get_encoding()
dammit = UnicodeDammit(self.body, [enc])
benc = dammit.originalEncoding
self._cached_benc = benc
# UnicodeDammit is buggy decoding utf-16
if self._cached_ubody is None and benc != 'utf-16':
self._cached_ubody = dammit.unicode
return self._cached_benc
def _body_declared_encoding(self):
# implemented in subclasses (XmlResponse, HtmlResponse)
return None
|
Jayflux/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/html5parser.py
|
423
|
from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
midma101/m0du1ar
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py
|
2762
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
ohsu-computational-biology/server
|
refs/heads/g2p-2.5
|
ez_setup.py
|
30
|
#!/usr/bin/env python
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "5.4.1"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
Shiroy/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_array_of_interface.py
|
158
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface A {
attribute long a;
};
interface B {
attribute A[] b;
};
""");
parser.finish()
|
t794104/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/netscaler/netscaler_ssl_certkey.py
|
31
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_ssl_certkey
short_description: Manage ssl cerificate keys.
description:
- Manage ssl cerificate keys.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
certkey:
description:
- >-
Name for the certificate and private-key pair. Must begin with an ASCII alphanumeric or underscore
C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ),
colon C(:), at C(@), equals C(=), and hyphen C(-) characters. Cannot be changed after the certificate-key
pair is created.
- "The following requirement applies only to the NetScaler CLI:"
- >-
If the name includes one or more spaces, enclose the name in double or single quotation marks (for
example, "my cert" or 'my cert').
- "Minimum length = 1"
cert:
description:
- >-
Name of and, optionally, path to the X509 certificate file that is used to form the certificate-key
pair. The certificate file should be present on the appliance's hard-disk drive or solid-state drive.
Storing a certificate in any location other than the default might cause inconsistency in a high
availability setup. /nsconfig/ssl/ is the default path.
- "Minimum length = 1"
key:
description:
- >-
Name of and, optionally, path to the private-key file that is used to form the certificate-key pair.
The certificate file should be present on the appliance's hard-disk drive or solid-state drive.
Storing a certificate in any location other than the default might cause inconsistency in a high
availability setup. /nsconfig/ssl/ is the default path.
- "Minimum length = 1"
password:
description:
- >-
Passphrase that was used to encrypt the private-key. Use this option to load encrypted private-keys
in PEM format.
inform:
choices:
- 'DER'
- 'PEM'
- 'PFX'
description:
- >-
Input format of the certificate and the private-key files. The three formats supported by the
appliance are:
- "PEM - Privacy Enhanced Mail"
- "DER - Distinguished Encoding Rule"
- "PFX - Personal Information Exchange."
passplain:
description:
- >-
Pass phrase used to encrypt the private-key. Required when adding an encrypted private-key in PEM
format.
- "Minimum length = 1"
expirymonitor:
choices:
- 'enabled'
- 'disabled'
description:
- "Issue an alert when the certificate is about to expire."
notificationperiod:
description:
- >-
Time, in number of days, before certificate expiration, at which to generate an alert that the
certificate is about to expire.
- "Minimum value = C(10)"
- "Maximum value = C(100)"
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup ssl certkey
delegate_to: localhost
netscaler_ssl_certkey:
nitro_user: nsroot
nitro_pass: nsroot
nsip: 172.18.0.2
certkey: certirificate_1
cert: server.crt
key: server.key
expirymonitor: enabled
notificationperiod: 30
inform: PEM
password: False
passplain: somesecret
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslcertkey import sslcertkey
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, \
get_immutables_intersection
def key_exists(client, module):
log('Checking if key exists')
log('certkey is %s' % module.params['certkey'])
all_certificates = sslcertkey.get(client)
certkeys = [item.certkey for item in all_certificates]
if module.params['certkey'] in certkeys:
return True
else:
return False
def key_identical(client, module, sslcertkey_proxy):
log('Checking if configured key is identical')
sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey'])
diff_dict = sslcertkey_proxy.diff_object(sslcertkey_list[0])
if 'password' in diff_dict:
del diff_dict['password']
if 'passplain' in diff_dict:
del diff_dict['passplain']
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, sslcertkey_proxy):
sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey'])
return sslcertkey_proxy.diff_object(sslcertkey_list[0])
def main():
module_specific_arguments = dict(
certkey=dict(type='str'),
cert=dict(type='str'),
key=dict(type='str'),
password=dict(type='bool'),
inform=dict(
type='str',
choices=[
'DER',
'PEM',
'PFX',
]
),
passplain=dict(
type='str',
no_log=True,
),
expirymonitor=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
notificationperiod=dict(type='float'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'certkey',
'cert',
'key',
'password',
'inform',
'passplain',
'expirymonitor',
'notificationperiod',
]
readonly_attrs = [
'signaturealg',
'certificatetype',
'serial',
'issuer',
'clientcertnotbefore',
'clientcertnotafter',
'daystoexpiration',
'subject',
'publickey',
'publickeysize',
'version',
'priority',
'status',
'passcrypt',
'data',
'servicename',
]
immutable_attrs = [
'certkey',
'cert',
'key',
'password',
'inform',
'passplain',
]
transforms = {
'expirymonitor': [lambda v: v.upper()],
}
# Instantiate config proxy
sslcertkey_proxy = ConfigProxy(
actual=sslcertkey(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
if module.params['state'] == 'present':
log('Applying actions for state present')
if not key_exists(client, module):
if not module.check_mode:
log('Adding certificate key')
sslcertkey_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not key_identical(client, module, sslcertkey_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(sslcertkey_proxy, diff_list(client, module, sslcertkey_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, sslcertkey_proxy),
**module_result
)
if not module.check_mode:
sslcertkey_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not key_exists(client, module):
module.fail_json(msg='SSL certkey does not exist')
if not key_identical(client, module, sslcertkey_proxy):
module.fail_json(msg='SSL certkey differs from configured', diff=diff_list(client, module, sslcertkey_proxy))
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if key_exists(client, module):
if not module.check_mode:
sslcertkey_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if key_exists(client, module):
module.fail_json(msg='SSL certkey still exists')
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
dymkowsk/mantid
|
refs/heads/master
|
scripts/test/ReductionSettingsTest.py
|
3
|
import unittest
from mantid.simpleapi import *
from reduction_settings import *
class BasicSettingsObjectUsageTest(unittest.TestCase):
def setUp(self):
self.settings = get_settings_object("BasicSettingsObjectUsageTest")
def tearDown(self):
for prop_man_name in PropertyManagerDataService.getObjectNames():
PropertyManagerDataService.remove(prop_man_name)
def test_string_roundtrip(self):
self.settings["name"] = "value"
self.assertEquals(self.settings["name"], "value")
def test_float_roundtrip(self):
self.settings["name"] = 0.1
self.assertEquals(self.settings["name"], 0.1)
def test_int_roundtrip(self):
self.settings["name"] = 1
self.assertEquals(self.settings["name"], 1)
def test_keys(self):
self.settings["A"] = 1
self.settings["B"] = 2
self.assertEquals(self.settings.keys(), ["A", "B"])
def test_values(self):
self.settings["A"] = 1
self.settings["B"] = 2
self.assertEquals(self.settings.values(), [1, 2])
def test_items(self):
self.settings["A"] = 1
self.settings["B"] = 2
self.assertEquals(self.settings.items(), [("A", 1), ("B", 2)])
def test_size(self):
self.settings["A"] = 1
self.settings["B"] = 2
self.assertEquals(len(self.settings), 2)
def test_contains(self):
self.settings["name"] = 1
self.assertTrue("name" in self.settings)
def test_clear(self):
settings = get_settings_object("test_clear")
settings["name"] = "value"
self.assertEquals(len(settings), 1)
settings.clear()
self.assertEquals(len(settings), 0)
def test_clone(self):
self.settings["A"] = 1
self.settings["B"] = 2
cloned = self.settings.clone("ClonedManager")
self.assertTrue("A" in cloned)
self.assertTrue("B" in cloned)
def test_clone_same_name_throws(self):
self.assertRaises(RuntimeError, self.settings.clone, "BasicSettingsObjectUsageTest")
def test_clone_name_already_exists_is_cleared(self):
a = get_settings_object("A")
a["a"] = 1
b = get_settings_object("B")
b["b"] = 2
c = b.clone("A")
c["c"] = 3
self.assertFalse("a" in c)
if __name__ == '__main__':
unittest.main()
|
lkostler/AME60649_project_final
|
refs/heads/master
|
moltemplate/moltemplate/src/remove_duplicates_nbody.py
|
30
|
#!/usr/bin/env python
"""
Get rid of lines containing duplicate bonded nbody interactions in the
corresponding section of a LAMMPS data file (such as bonds, angles,
dihedrals and impropers). Duplicate lines which occur later are
preserved and the earlier lines are erased.
(This program reads from sys.stdin. This program does not parse the entire
data file. The text from the relevant section of the LAMMPS file should be
extracted in advance before it is sent to this program.)
"""
import sys
in_stream = sys.stdin
if len(sys.argv) == 2:
n = int(sys.argv[1])
if (len(sys.argv) != 2) or (n < 1):
sys.stderr.write('Error (remove_duplicates_nbody.py): expected a positive integer argument.\n')
sys.exit(-1)
atom_ids_in_use = set([])
lines = in_stream.readlines()
# Start at the end of the file and read backwards.
# If duplicate lines exist, eliminate the ones that occur earlier in the file.
i = len(lines)
while i > 0:
i -= 1
line_orig = lines[i]
line = line_orig.rstrip('\n')
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
tokens = line.strip().split()
if len(tokens) == 2+n:
atom_ids = tuple(tokens[2:2+n])
if atom_ids in atom_ids_in_use:
del lines[i]
else:
atom_ids_in_use.add(atom_ids)
elif len(tokens) == 0:
del lines[i]
for line in lines:
sys.stdout.write(line)
|
hejq0310/git-repo
|
refs/heads/master
|
error.py
|
48
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ManifestParseError(Exception):
"""Failed to parse the manifest file.
"""
class ManifestInvalidRevisionError(Exception):
"""The revision value in a project is incorrect.
"""
class EditorError(Exception):
"""Unspecified error from the user's text editor.
"""
def __init__(self, reason):
super(EditorError, self).__init__()
self.reason = reason
def __str__(self):
return self.reason
class GitError(Exception):
"""Unspecified internal error from git.
"""
def __init__(self, command):
super(GitError, self).__init__()
self.command = command
def __str__(self):
return self.command
class UploadError(Exception):
"""A bundle upload to Gerrit did not succeed.
"""
def __init__(self, reason):
super(UploadError, self).__init__()
self.reason = reason
def __str__(self):
return self.reason
class DownloadError(Exception):
"""Cannot download a repository.
"""
def __init__(self, reason):
super(DownloadError, self).__init__()
self.reason = reason
def __str__(self):
return self.reason
class NoSuchProjectError(Exception):
"""A specified project does not exist in the work tree.
"""
def __init__(self, name=None):
super(NoSuchProjectError, self).__init__()
self.name = name
def __str__(self):
if self.Name is None:
return 'in current directory'
return self.name
class InvalidProjectGroupsError(Exception):
"""A specified project is not suitable for the specified groups
"""
def __init__(self, name=None):
super(InvalidProjectGroupsError, self).__init__()
self.name = name
def __str__(self):
if self.Name is None:
return 'in current directory'
return self.name
class RepoChangedException(Exception):
"""Thrown if 'repo sync' results in repo updating its internal
repo or manifest repositories. In this special case we must
use exec to re-execute repo with the new code and manifest.
"""
def __init__(self, extra_args=None):
super(RepoChangedException, self).__init__()
self.extra_args = extra_args or []
class HookError(Exception):
"""Thrown if a 'repo-hook' could not be run.
The common case is that the file wasn't present when we tried to run it.
"""
|
jgabriellima/self_organization_map
|
refs/heads/master
|
som.py
|
1
|
"""
Self Organizing maps neural networks.
"""
import random
import math
class Map(object):
#Class Constructor
#@param dimensions: Number of input dimensions
#@param length: Length of the output grid
#@param filePath: The file path with the input data.
def __init__(self,dimensions,length,filePath):
#Collection of weights
self.outputs = []
#Current iteration
self.iteration = 0
#Side length of output grid.
self.length = length
#Number of input dimensions
self.dimensions = dimensions
#Random Generator
self.rand = random.Random()
#Label<Patterns> dict
self.patterns = {}
#Initialise the SOM
self.initialise()
#Load the dataset
self.loadData(filePath)
#Normalise the patterns
self.normalisePatterns()
#Training method
self.train(0.0000001)
#Dump the coordinates
self.dumpCoordinates()
#Initialise the Network
def initialise(self):
#10x10 Dimensional Grid
for i in xrange(self.length):
self.outputs.append([])
for j in xrange(self.length):
self.outputs[i].append(Neuron(i,j,self.length))
for k in xrange(self.dimensions):
self.outputs[i][j].weights.append(self.rand.random())
#Load the dataset
#@param filePath: The file path
def loadData(self,filePath):
fileHandle = open(filePath)
#Ignore the first line (header)
fileList = fileHandle.readlines()[1:]
for line in fileList:
line = line.strip()
lineSet = line.split(',')
self.patterns[lineSet[0]] = []
for i in xrange(self.dimensions):
self.patterns[lineSet[0]].append(float(lineSet[i+1]))
fileHandle.close()
#Normalise the patterns
def normalisePatterns(self):
for j in xrange(self.dimensions):
sum = 0.0
for key in self.patterns.keys():
sum += self.patterns[key][j]
average = sum / float(len(self.patterns))
for key in self.patterns.keys():
self.patterns[key][j] = self.patterns[key][j] / average
#The training method
#@param maxError: the error treshold
def train(self,maxError):
currentError = 100000.0
while currentError > maxError:
currentError = 0.0
trainingSet = []
for pattern in self.patterns.values():
trainingSet.append(pattern)
for i in xrange(len(self.patterns)):
pattern = trainingSet[self.rand.randrange(len(self.patterns)-i)]
currentError += self.trainPattern(pattern)
trainingSet.remove(pattern)
print "Current Error: %.7f" % (currentError,)
#Train Pattern
#@param pattern: The input pattern
def trainPattern(self,pattern):
error = 0.0
winner = self.winner(pattern)
for i in xrange(self.length):
for j in xrange(self.length):
error += self.outputs[i][j].updateWeights(pattern,winner,self.iteration)
self.iteration+=1
return abs(error / (self.length * self.length))
#The winner takes all rule
#@param pattern: the input pattern
#@return the neuron winner
def winner(self,pattern):
winner = None
minD = 10000000.0
for i in xrange(self.length):
for j in xrange(self.length):
d = self.distance(pattern,self.outputs[i][j].weights)
if d < minD:
minD = d
winner = self.outputs[i][j]
return winner
#The euclidian distance
#@param inVector: the input Pattern
#@param outVector: the output neurons vector
def distance(self,inVector,outVector):
value = 0.0
for i in xrange(len(inVector)):
value += pow((inVector[i] - outVector[i]),2)
return math.sqrt(value)
#Dump the coordinates
def dumpCoordinates(self):
for key in self.patterns.keys():
n = self.winner(self.patterns[key])
print "%s,%d,%d" % (key,n.X,n.Y)
#Simple SOM Neuron
class Neuron(object):
#Class Constructor
#@param x : X Coordinate
#@param y : Y Coordinate
#@param length: The Strength
def __init__(self,x,y,length):
#Neuron weights
self.weights = []
#X Coordinate
self.X = x
#Y Coordinate
self.Y = y
#Length
self.length = length
#nf
self.nf = 1000 / math.log(length)
#Gaussian Equation
#@param winner: The neuron winner
#@param iteration: current iteration
def gauss(self,winner,iteration):
distance = math.sqrt(pow(winner.X - self.X,2) + pow(winner.Y - self.Y,2))
return math.exp(-pow(distance,2) / (pow(self.strength(iteration),2)))
#Set the learning rate
#@param iteration: The current iteration
def learningRate(self,iteration):
return math.exp(-iteration/1000) * 0.1
#Set the strength (Neighborhood rate)
#@param iteration : the current iteration
def strength(self,iteration):
return math.exp(-iteration/self.nf) * self.length
#Update the weights
#@param pattern: The input pattern
#@param winner: The neuron winner
#@param iteration: The current iteration
def updateWeights(self,pattern,winner,iteration):
sum = 0.0
for i in xrange(len(self.weights)):
delta = self.learningRate(iteration) * self.gauss(winner,iteration) * (pattern[i] - self.weights[i])
self.weights[i] += delta
sum += delta
return sum / len(self.weights)
#####################################
#import psyco
#psyco.full()
|
ylando2/pysol-with-easy-gaps
|
refs/heads/master
|
pysollib/games/ultra/tarock.py
|
2
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
__all__ = []
# Imports
import sys
# Ultrasol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.util import *
from pysollib.mfxutil import kwdefault
from pysollib.stack import *
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.games.special.tarock import AbstractTarockGame, Grasshopper
from pysollib.games.threepeaks import ThreePeaksNoScore
# ************************************************************************
# *
# ************************************************************************
class Tarock_OpenStack(OpenStack):
def __init__(self, x, y, game, yoffset=-1, **cap):
kwdefault(cap, max_move=UNLIMITED_MOVES, max_accept=UNLIMITED_ACCEPTS, dir=-1)
OpenStack.__init__(self, x, y, game, **cap)
if yoffset < 0:
yoffset = game.app.images.CARD_YOFFSET
self.CARD_YOFFSET = yoffset
def isRankSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
for c2 in cards[1:]:
if not c1.rank + dir == c2.rank:
return 0
c1 = c2
return 1
def isAlternateColorSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
for c2 in cards[1:]:
if (c1.color < 2 and c1.color == c2.color
or not c1.rank + dir == c2.rank):
return 0
c1 = c2
return 1
def isSuitSequence(self, cards, dir=None):
if not dir:
dir = self.cap.dir
c1 = cards[0]
for c2 in cards[1:]:
if not (c1.suit == c2.suit
and c1.rank + dir == c2.rank):
return 0
c1 = c2
return 1
def isHighRankCard(self, card):
maxcard = ([self.game.gameinfo.ranks[-1], self.game.gameinfo.trumps[-1]]
[(card.suit == len(self.game.gameinfo.suits))])
return card.rank == maxcard or self.cap.base_rank == ANY_RANK
class Tarock_RK_RowStack(Tarock_OpenStack):
def acceptsCards(self, from_stack, cards):
if (not self.basicAcceptsCards(from_stack, cards)
or not self.isRankSequence(cards)):
return 0
if not self.cards:
return self.isHighRankCard(cards[0])
return self.isRankSequence([self.cards[-1], cards[0]])
def canMoveCards(self, cards):
return (self.basicCanMoveCards(cards)
and self.isRankSequence(cards))
class Tarock_SS_RowStack(Tarock_OpenStack):
def acceptsCards(self, from_stack, cards):
if (not self.basicAcceptsCards(from_stack, cards)
or not self.isSuitSequence(cards)):
return 0
if not self.cards:
return self.isHighRankCard(cards[0])
return self.isSuitSequence([self.cards[-1], cards[0]])
def canMoveCards(self, cards):
return (self.basicCanMoveCards(cards)
and self.isSuitSequence(cards))
class Tarock_AC_RowStack(Tarock_OpenStack):
def acceptsCards(self, from_stack, cards):
if (not self.basicAcceptsCards(from_stack, cards)
or not self.isAlternateColorSequence(cards)):
return 0
if not self.cards:
return self.isHighRankCard(cards[0])
return self.isAlternateColorSequence([self.cards[-1], cards[0]])
def canMoveCards(self, cards):
return (self.basicCanMoveCards(cards)
and self.isAlternateColorSequence(cards))
# ************************************************************************
# *
# ************************************************************************
class Cockroach(Grasshopper):
MAX_ROUNDS = 1
class DoubleCockroach(Grasshopper):
MAX_ROUNDS = 1
# ************************************************************************
# *
# ************************************************************************
class Corkscrew(AbstractTarockGame):
RowStack_Class = StackWrapper(Tarock_RK_RowStack, base_rank=NO_RANK)
#
# game layout
#
def createGame(self, rows=11, reserves=10):
# create layout
l, s = Layout(self), self.s
# set size
maxrows = max(rows, reserves)
self.setSize(l.XM + (maxrows + 2) * l.XS, l.YM + 6 * l.YS)
#
playcards = 4 * l.YS / l.YOFFSET
xoffset, yoffset = [], []
for i in range(playcards):
xoffset.append(0)
yoffset.append(l.YOFFSET)
for i in range(78 * self.gameinfo.decks - playcards):
xoffset.append(l.XOFFSET)
yoffset.append(0)
# create stacks
x, y = l.XM + (maxrows - reserves) * l.XS / 2, l.YM
for i in range(reserves):
s.reserves.append(ReserveStack(x, y, self))
x = x + l.XS
x, y = l.XM + (maxrows - rows) * l.XS / 2, l.YM + l.YS
self.setRegion(s.reserves, (-999, -999, 999999, y - l.YM / 2))
for i in range(rows):
stack = self.RowStack_Class(x, y, self, yoffset=l.YOFFSET)
stack.CARD_XOFFSET = xoffset
stack.CARD_YOFFSET = yoffset
s.rows.append(stack)
x = x + l.XS
x, y = l.XM + maxrows * l.XS, l.YM
for i in range(2):
for suit in range(5):
s.foundations.append(SS_FoundationStack(x, y, self, suit=suit,
max_cards=14 + 8 * (suit == 4)))
y = y + l.YS
x, y = x + l.XS, l.YM
self.setRegion(self.s.foundations, (x - l.XS * 2, -999, 999999,
self.height - (l.YS + l.YM)), priority=1)
s.talon = InitialDealTalonStack(self.width - 3 * l.XS / 2, self.height - l.YS, self)
# define stack-groups
l.defaultStackGroups()
#
# game overrides
#
def startGame(self):
self.startDealSample()
i = 0
while self.s.talon.cards:
card = self.s.talon.cards[-1]
if card.rank == 13 + 8 * (card.suit == 4):
if self.s.rows[i].cards:
i = i + 1
self.s.talon.dealRow(rows=[self.s.rows[i]], frames=4)
# must look at cards
def _getClosestStack(self, cx, cy, stacks, dragstack):
closest, cdist = None, 999999999
for stack in stacks:
if stack.cards and stack is not dragstack:
dist = (stack.cards[-1].x - cx)**2 + (stack.cards[-1].y - cy)**2
else:
dist = (stack.x - cx)**2 + (stack.y - cy)**2
if dist < cdist:
closest, cdist = stack, dist
return closest
def shallHighlightMatch(self, stack1, card1, stack2, card2):
row = self.s.rows[0]
sequence = row.isRankSequence
return (sequence([card1, card2]) or sequence([card2, card1]))
# ************************************************************************
# *
# ************************************************************************
class Serpent(Corkscrew):
RowStack_Class = StackWrapper(Tarock_AC_RowStack, base_rank=NO_RANK)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
row = self.s.rows[0]
sequence = row.isAlternateColorSequence
return (sequence([card1, card2]) or sequence([card2, card1]))
# ************************************************************************
# *
# ************************************************************************
class Rambling(Corkscrew):
RowStack_Class = StackWrapper(Tarock_SS_RowStack, base_rank=NO_RANK)
def shallHighlightMatch(self, stack1, card1, stack2, card2):
row = self.s.rows[0]
sequence = row.isSuitSequence
return (sequence([card1, card2]) or sequence([card2, card1]))
# ************************************************************************
# * Le Grande Teton
# ************************************************************************
class LeGrandeTeton(ThreePeaksNoScore):
pass
# ************************************************************************
# * register the games
# ************************************************************************
def r(id, gameclass, name, game_type, decks, redeals, skill_level):
game_type = game_type | GI.GT_TAROCK | GI.GT_CONTRIB | GI.GT_ORIGINAL
gi = GameInfo(id, gameclass, name, game_type, decks, redeals, skill_level,
ranks=range(14), trumps=range(22))
registerGame(gi)
return gi
r(13163, Cockroach, 'Cockroach', GI.GT_TAROCK, 1, 0, GI.SL_MOSTLY_SKILL)
r(13164, DoubleCockroach, 'Double Cockroach', GI.GT_TAROCK, 2, 0, GI.SL_MOSTLY_SKILL)
r(13165, Corkscrew, 'Corkscrew', GI.GT_TAROCK, 2, 0, GI.SL_MOSTLY_SKILL)
r(13166, Serpent, 'Serpent', GI.GT_TAROCK, 2, 0, GI.SL_MOSTLY_SKILL)
r(13167, Rambling, 'Rambling', GI.GT_TAROCK, 2, 0, GI.SL_MOSTLY_SKILL)
r(22232, LeGrandeTeton, 'Le Grande Teton', GI.GT_TAROCK, 1, 0, GI.SL_BALANCED)
|
HackLinux/goblin-core
|
refs/heads/master
|
llvm/3.4.2/llvm-3.4.2.src/test/CodeGen/SystemZ/Large/branch-range-03.py
|
10
|
# Test 32-bit COMPARE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 14 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches.
#
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 1(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 2(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 3(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 4(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 5(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 6(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 7(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lb [[REG:%r[0-5]]], 25(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 26(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 27(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 28(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 29(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 30(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 31(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 32(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print 'define void @f1(i8 *%base, i8 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load volatile i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load volatile i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
|
TheoRettisch/p2pool-giarcoin
|
refs/heads/master
|
p2pool/web.py
|
47
|
from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer, reactor
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, bitcoind_getinfo_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
diff = bitcoin_data.target_to_difficulty(wb.current_work.value['bits'].target)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
network_block_difficulty=diff,
network_hashrate=(diff * 2**32 // node.net.PARENT.BLOCK_PERIOD),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_last_difficulties = {}
for addr in wb.last_work_shares.value:
miner_last_difficulties[addr] = bitcoin_data.target_to_difficulty(wb.last_work_shares.value[addr].target)
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
miner_last_difficulties=miner_last_difficulties,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, bitcoind_getinfo_var.value, node.bitcoind_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=pack.IntType(24).unpack(s.share_data['coinbase'][1:4]) if len(s.share_data['coinbase']) >= 4 else None,
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
last_txout_nonce='%016x' % share.contents['last_txout_nonce'],
),
other_transaction_hashes=['%064x' % x for x in share.get_other_tx_hashes(node.tracker)],
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
new_root.putChild('my_share_hashes', WebInterface(lambda: ['%064x' % my_share_hash for my_share_hash in wb.my_share_hashes]))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share1a())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
tx_explorer_url_prefix=node.net.PARENT.TX_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False,
multivalues=True, multivalue_undefined_means_0=True,
default_func=graph.make_multivalue_migrator(dict(good='local_share_hash_rate', dead='local_dead_share_hash_rate', orphan='local_orphan_share_hash_rate'),
post_func=lambda bins: [dict((k, (v[0] - (sum(bin.get(rem_k, (0, 0))[0] for rem_k in ['dead', 'orphan']) if k == 'good' else 0), v[1])) for k, v in bin.iteritems()) for bin in bins])),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'peers': graph.DataStreamDescription(dataview_descriptions, multivalues=True, default_func=graph.make_multivalue_migrator(dict(incoming='incoming_peers', outgoing='outgoing_peers'))),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead, share_hash):
t = time.time()
if not dead:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=work))
else:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=work))
def later():
res = node.tracker.is_child_of(share_hash, node.best_share_var.value)
if res is None: res = False # share isn't connected to sharechain? assume orphaned
if res and dead: # share was DOA, but is now in sharechain
# move from dead to good
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=-work, good=work))
elif not res and not dead: # share wasn't DOA, and isn't in sharechain
# move from good to orphan
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=-work, orphan=work))
reactor.callLater(200, later)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['peers'].add_datum(t, dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.bitcoind_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'web-static')))
return web_root
|
IPMITMO/statan
|
refs/heads/master
|
coala-bears/tests/verilog/VerilogLintBearTest.py
|
24
|
from bears.verilog.VerilogLintBear import VerilogLintBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
good_file = """
module mux2to1 (w0, w1, s, f);
input w0, w1, s;
output f;
assign f = s ? w1 : w0;
endmodule
"""
bad_file = """
module updowncount(R, Clock, L, E, up_down, Q);
parameter n=8;
input [n-1:0] R;
input Clock, L, E, up_down;
output [n-1:0] Q;
reg [n-1:0] Q;
integer direction;
always @(posedge Clock)
begin
if (up_down)
direction = 1;
else
direction = -1;
if (L)
Q <= R;
else if (E)
Q <= Q + direction;
end
endmodule
"""
VerilogLintBearTest = verify_local_bear(VerilogLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
|
xindus40223115/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/webbrowser.py
|
735
|
from browser import window
__all__ = ["Error", "open", "open_new", "open_new_tab"]
class Error(Exception):
pass
_target = { 0: '', 1: '_blank', 2: '_new' } # hack...
def open(url, new=0, autoraise=True):
"""
new window or tab is not controllable
on the client side. autoraise not available.
"""
if window.open(url, _target[new]):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
|
lancezlin/pyjs
|
refs/heads/master
|
pyjs/lib/getopt.py
|
8
|
# -*- coding: iso-8859-1 -*-
"""Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
#import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
#elif os.environ.get("POSIXLY_CORRECT"):
# all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
i = opt.find('=')
#changed because pyjamas don't get the index error
#it throw a javascript error instead of a ValueError
if i == -1: optarg = None
else : opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
#__EOF__
|
michaelpacer/scikit-image
|
refs/heads/master
|
skimage/viewer/utils/dialogs.py
|
37
|
import os
from ..qt import QtGui
__all__ = ['open_file_dialog', 'save_file_dialog']
def _format_filename(filename):
if isinstance(filename, tuple):
# Handle discrepancy between PyQt4 and PySide APIs.
filename = filename[0]
if len(filename) == 0:
return None
return str(filename)
def open_file_dialog():
"""Return user-selected file path."""
filename = QtGui.QFileDialog.getOpenFileName()
filename = _format_filename(filename)
return filename
def save_file_dialog(default_format='png'):
"""Return user-selected file path."""
filename = QtGui.QFileDialog.getSaveFileName()
filename = _format_filename(filename)
if filename is None:
return None
#TODO: io plugins should assign default image formats
basename, ext = os.path.splitext(filename)
if not ext:
filename = '%s.%s' % (filename, default_format)
return filename
|
DinoCow/airflow
|
refs/heads/master
|
airflow/providers/apache/hive/example_dags/example_twitter_dag.py
|
7
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
"""
This is an example dag for managing twitter data.
"""
from datetime import date, timedelta
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.providers.apache.hive.operators.hive import HiveOperator
from airflow.utils.dates import days_ago
# --------------------------------------------------------------------------------
# Create a few placeholder scripts. In practice these would be different python
# script files, which are imported in this section with absolute or relative imports
# --------------------------------------------------------------------------------
def fetchtweets():
"""
This is a placeholder for fetchtweets.
"""
def cleantweets():
"""
This is a placeholder for cleantweets.
"""
def analyzetweets():
"""
This is a placeholder for analyzetweets.
"""
def transfertodb():
"""
This is a placeholder for transfertodb.
"""
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Ekhtiar',
'depends_on_past': False,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
with DAG(
dag_id='example_twitter_dag',
default_args=default_args,
schedule_interval="@daily",
start_date=days_ago(5),
tags=['example'],
) as dag:
# --------------------------------------------------------------------------------
# This task should call Twitter API and retrieve tweets from yesterday from and to
# for the four twitter users (Twitter_A,..,Twitter_D) There should be eight csv
# output files generated by this task and naming convention
# is direction(from or to)_twitterHandle_date.csv
# --------------------------------------------------------------------------------
fetch_tweets = PythonOperator(task_id='fetch_tweets', python_callable=fetchtweets)
# --------------------------------------------------------------------------------
# Clean the eight files. In this step you can get rid of or cherry pick columns
# and different parts of the text
# --------------------------------------------------------------------------------
clean_tweets = PythonOperator(task_id='clean_tweets', python_callable=cleantweets)
clean_tweets << fetch_tweets
# --------------------------------------------------------------------------------
# In this section you can use a script to analyze the twitter data. Could simply
# be a sentiment analysis through algorithms like bag of words or something more
# complicated. You can also take a look at Web Services to do such tasks
# --------------------------------------------------------------------------------
analyze_tweets = PythonOperator(task_id='analyze_tweets', python_callable=analyzetweets)
analyze_tweets << clean_tweets
# --------------------------------------------------------------------------------
# Although this is the last task, we need to declare it before the next tasks as we
# will use set_downstream This task will extract summary from Hive data and store
# it to MySQL
# --------------------------------------------------------------------------------
hive_to_mysql = PythonOperator(task_id='hive_to_mysql', python_callable=transfertodb)
# --------------------------------------------------------------------------------
# The following tasks are generated using for loop. The first task puts the eight
# csv files to HDFS. The second task loads these files from HDFS to respected Hive
# tables. These two for loops could be combined into one loop. However, in most cases,
# you will be running different analysis on your incoming incoming and outgoing tweets,
# and hence they are kept separated in this example.
# --------------------------------------------------------------------------------
from_channels = ['fromTwitter_A', 'fromTwitter_B', 'fromTwitter_C', 'fromTwitter_D']
to_channels = ['toTwitter_A', 'toTwitter_B', 'toTwitter_C', 'toTwitter_D']
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# define where you want to store the tweets csv file in your local directory
local_dir = "/tmp/"
# define the location where you want to store in HDFS
hdfs_dir = " /tmp/"
for channel in to_channels:
file_name = "to_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f "
+ local_dir
+ file_name
+ hdfs_dir
+ channel
+ "/",
)
load_to_hdfs << analyze_tweets
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" + hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')",
)
load_to_hive << load_to_hdfs
load_to_hive >> hive_to_mysql
for channel in from_channels:
file_name = "from_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f "
+ local_dir
+ file_name
+ hdfs_dir
+ channel
+ "/",
)
load_to_hdfs << analyze_tweets
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" + hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')",
)
load_to_hive << load_to_hdfs
load_to_hive >> hive_to_mysql
|
cstipkovic/spidermonkey-research
|
refs/heads/master
|
js/src/tests/lib/tasks_win.py
|
4
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
from __future__ import print_function, unicode_literals, division
import subprocess
import sys
from datetime import datetime, timedelta
from progressbar import ProgressBar
from results import NullTestOutput, TestOutput, escape_cmdline
from threading import Thread
from Queue import Queue, Empty
class EndMarker:
pass
class TaskFinishedMarker:
pass
def _do_work(qTasks, qResults, qWatch, prefix, run_skipped, timeout, show_cmd):
while True:
test = qTasks.get(block=True, timeout=sys.maxint)
if test is EndMarker:
qWatch.put(EndMarker)
qResults.put(EndMarker)
return
if not test.enable and not run_skipped:
qResults.put(NullTestOutput(test))
continue
# Spawn the test task.
cmd = test.get_command(prefix)
if show_cmd:
print(escape_cmdline(cmd))
tStart = datetime.now()
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Push the task to the watchdog -- it will kill the task
# if it goes over the timeout while we keep its stdout
# buffer clear on the "main" worker thread.
qWatch.put(proc)
out, err = proc.communicate()
qWatch.put(TaskFinishedMarker)
# Create a result record and forward to result processing.
dt = datetime.now() - tStart
result = TestOutput(test, cmd, out, err, proc.returncode, dt.total_seconds(),
dt > timedelta(seconds=timeout))
qResults.put(result)
def _do_watch(qWatch, timeout):
while True:
proc = qWatch.get(True)
if proc == EndMarker:
return
try:
fin = qWatch.get(block=True, timeout=timeout)
assert fin is TaskFinishedMarker, "invalid finish marker"
except Empty:
# Timed out, force-kill the test.
try:
proc.terminate()
except WindowsError as ex:
# If the process finishes after we time out but before we
# terminate, the terminate call will fail. We can safely
# ignore this.
if ex.winerror != 5:
raise
fin = qWatch.get(block=True, timeout=sys.maxint)
assert fin is TaskFinishedMarker, "invalid finish marker"
def run_all_tests(tests, prefix, pb, options):
"""
Uses scatter-gather to a thread-pool to manage children.
"""
qTasks, qResults = Queue(), Queue()
workers = []
watchdogs = []
for _ in range(options.worker_count):
qWatch = Queue()
watcher = Thread(target=_do_watch, args=(qWatch, options.timeout))
watcher.setDaemon(True)
watcher.start()
watchdogs.append(watcher)
worker = Thread(target=_do_work, args=(qTasks, qResults, qWatch,
prefix, options.run_skipped,
options.timeout, options.show_cmd))
worker.setDaemon(True)
worker.start()
workers.append(worker)
# Insert all jobs into the queue, followed by the queue-end
# marker, one per worker. This will not block on growing the
# queue, only on waiting for more items in the generator. The
# workers are already started, however, so this will process as
# fast as we can produce tests from the filesystem.
def _do_push(num_workers, qTasks):
for test in tests:
qTasks.put(test)
for _ in range(num_workers):
qTasks.put(EndMarker)
pusher = Thread(target=_do_push, args=(len(workers), qTasks))
pusher.setDaemon(True)
pusher.start()
# Read from the results.
ended = 0
delay = ProgressBar.update_granularity().total_seconds()
while ended < len(workers):
try:
result = qResults.get(block=True, timeout=delay)
if result is EndMarker:
ended += 1
else:
yield result
except Empty:
pb.poke()
# Cleanup and exit.
pusher.join()
for worker in workers:
worker.join()
for watcher in watchdogs:
watcher.join()
assert qTasks.empty(), "Send queue not drained"
assert qResults.empty(), "Result queue not drained"
|
hivesolutions/appier
|
refs/heads/master
|
src/appier/test/typesf.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2021 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2021 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import calendar
import unittest
import datetime
import appier
from . import mock
class TypesfTest(unittest.TestCase):
def setUp(self):
self.app = appier.App()
self.app._register_models_m(mock, "Mocks")
def tearDown(self):
self.app.unload()
adapter = appier.get_adapter()
adapter.drop_db()
def test_reference(self):
person = mock.Person()
person.name = "Name"
person.car = mock.Person.car["type"]("")
self.assertEqual(person.car, None)
self.assertEqual(person.car, mock.Person.car["type"](""))
self.assertEqual(person.car, mock.Person.car["type"](b""))
self.assertEqual(person.car, mock.Person.car["type"](None))
self.assertNotEqual(person.car, mock.Person.father["type"](1))
self.assertNotEqual(person.car, mock.Person.car["type"](1))
self.assertNotEqual(person.car, "car")
self.assertEqual(isinstance(person.car, appier.Reference), True)
self.assertEqual(len(person.car), 0)
person = mock.Person()
person.name = "Name"
person.car = mock.Person.car["type"](b"")
self.assertEqual(person.car, None)
self.assertEqual(person.car, mock.Person.car["type"](""))
self.assertEqual(person.car, mock.Person.car["type"](b""))
self.assertEqual(person.car, mock.Person.car["type"](None))
self.assertNotEqual(person.car, mock.Person.father["type"](1))
self.assertNotEqual(person.car, mock.Person.car["type"](1))
self.assertNotEqual(person.car, "car")
self.assertEqual(isinstance(person.car, appier.Reference), True)
self.assertEqual(len(person.car), 0)
person = mock.Person()
person.name = "Name"
person.car = mock.Person.car["type"](None)
self.assertEqual(person.car, None)
self.assertEqual(person.car, mock.Person.car["type"](""))
self.assertEqual(person.car, mock.Person.car["type"](b""))
self.assertEqual(person.car, mock.Person.car["type"](None))
self.assertNotEqual(person.car, mock.Person.father["type"](1))
self.assertNotEqual(person.car, mock.Person.car["type"](1))
self.assertNotEqual(person.car, "car")
self.assertEqual(isinstance(person.car, appier.Reference), True)
self.assertEqual(len(person.car), 0)
person = mock.Person()
person.name = "Name"
person.car = mock.Person.car["type"](1)
self.assertEqual(person.car, mock.Person.car["type"](1))
self.assertNotEqual(person.car, None)
self.assertNotEqual(person.car, mock.Person.car["type"](""))
self.assertNotEqual(person.car, mock.Person.car["type"](b""))
self.assertNotEqual(person.car, mock.Person.car["type"](None))
self.assertNotEqual(person.car, mock.Person.father["type"](1))
self.assertNotEqual(person.car, "car")
self.assertEqual(isinstance(person.car, appier.Reference), True)
self.assertEqual(len(person.car), 1)
def test_references(self):
person = mock.Person()
person.name = "Name"
person.cats = mock.Person.cats["type"]([1, 2, 3])
self.assertEqual(mock.Cat(identifier = 1) in person.cats, True)
self.assertEqual(mock.Cat(identifier = 3) in person.cats, True)
self.assertNotEqual(mock.Cat(identifier = 4) in person.cats, True)
self.assertNotEqual(person.cats, None)
self.assertNotEqual(person.cats, [])
self.assertNotEqual(person.cats, "cars")
self.assertEqual(isinstance(person.cats, appier.References), True)
self.assertEqual(len(person.cats), 3)
def test_file(self):
file_m = dict(name = "hello", data = b"SGVsbG8gV29ybGQ=")
file = appier.File(file_m)
self.assertEqual(type(file.file_name), str)
self.assertEqual(type(file.data_b64), str)
self.assertEqual(type(file.data), appier.legacy.BYTES)
self.assertEqual(file.file_name, "hello")
self.assertEqual(file.data, b"Hello World")
self.assertEqual(file.data_b64, "SGVsbG8gV29ybGQ=")
file_d = b"Hello World"
file = appier.File(file_d)
self.assertEqual(type(file.file_name), str)
self.assertEqual(type(file.data_b64), str)
self.assertEqual(type(file.data), appier.legacy.BYTES)
self.assertEqual(file.file_name, "default")
self.assertEqual(file.data, b"Hello World")
self.assertEqual(file.data_b64, "SGVsbG8gV29ybGQ=")
def test_encrypted(self):
encrypted = appier.encrypted(key = b"hello key")
result = encrypted("hello world")
self.assertEqual(str(result), "hello world")
self.assertEqual(result.value, "hello world")
self.assertEqual(result.encrypted, "vGgMtFgyMVwH3uE=:encrypted")
result = encrypted("vGgMtFgyMVwH3uE=:encrypted")
self.assertEqual(str(result), "hello world")
self.assertEqual(result.value, "hello world")
self.assertEqual(result.encrypted, "vGgMtFgyMVwH3uE=:encrypted")
encrypted = appier.encrypted(key = None)
result = encrypted("hello world")
self.assertEqual(str(result), "hello world")
self.assertEqual(result.value, "hello world")
self.assertEqual(result.value, "hello world")
result = encrypted("vGgMtFgyMVwH3uE=:encrypted")
self.assertEqual(str(result), "vGgMtFgyMVwH3uE=:encrypted")
self.assertEqual(result.value, "vGgMtFgyMVwH3uE=:encrypted")
self.assertEqual(result.encrypted, "vGgMtFgyMVwH3uE=:encrypted")
def test_dumpall(self):
person = mock.Person()
person.name = "Name"
person.save()
car = mock.Car()
car.name = "Car"
car.save()
father = mock.Person()
father.name = "Father"
father.save()
brother = mock.Person()
brother.name = "Brother"
brother.save()
person.car = car
person.father = father
person.brother = brother
person.save()
person = mock.Person.get(identifier = 1)
result = person.json_v()
self.assertEqual(type(result), dict)
self.assertEqual(result["name"], "Name")
result = person.car.json_v()
self.assertEqual(type(result), int)
self.assertEqual(result, 1)
result = person.father.json_v()
self.assertEqual(type(result), mock.Person)
self.assertEqual(result.name, "Father")
result = person.brother.json_v()
self.assertEqual(type(result), int)
self.assertEqual(result, 3)
def test_custom(self):
class DateTime(appier.Type):
def loads(self, value):
cls = self.__class__
if isinstance(value, cls):
self._datetime = value._datetime
elif isinstance(value, datetime.datetime):
self._datetime = value
elif isinstance(value, (int, float)):
self._datetime = datetime.datetime.utcfromtimestamp(value)
else:
raise appier.OperationalError()
def dumps(self):
return self.timestamp()
def timestamp(self):
return calendar.timegm(self._datetime.utctimetuple())
class CustomPerson(mock.Person):
birth = appier.field(
type = DateTime
)
self.app._register_model(CustomPerson)
person = CustomPerson()
person.name = "Name"
person.birth = DateTime(0)
person.save()
self.assertEqual(person.name, "Name")
self.assertEqual(type(person.birth), DateTime)
self.assertEqual(person.birth.timestamp(), 0)
person = CustomPerson.get(name = "Name")
self.assertEqual(person.name, "Name")
self.assertEqual(type(person.birth), DateTime)
self.assertEqual(person.birth.timestamp(), 0)
person = CustomPerson(name = "New Name", birth = 1)
person.save()
self.assertEqual(person.name, "New Name")
self.assertEqual(type(person.birth), DateTime)
self.assertEqual(person.birth.timestamp(), 1)
person = CustomPerson.get(birth = 1)
self.assertEqual(person.name, "New Name")
self.assertEqual(type(person.birth), DateTime)
self.assertEqual(person.birth.timestamp(), 1)
|
hsiaoyi0504/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/_memory_helpers.py
|
303
|
try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
|
alubbe/FrameworkBenchmarks
|
refs/heads/master
|
toolset/setup/linux/setup_util.py
|
40
|
import re
import os
import sys
import subprocess
import platform
from threading import Thread
from Queue import Queue, Empty
class NonBlockingStreamReader:
'''
Enables calling readline in a non-blocking manner with a blocking stream,
such as the ones returned from subprocess.Popen
Originally written by Eyal Arubas, who granted permission to use this inside TFB
See http://eyalarubas.com/python-subproc-nonblock.html
'''
def __init__(self, stream, eof_message = None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
eof_message: A message to print to stdout as soon
as the stream's end is reached. Useful if you
want to track the exact moment a stream terminates
'''
self._s = stream
self._q = Queue()
self._eof_message = eof_message
self._poisonpill = 'MAGIC_POISONPILL_STRING'
def _populateQueue(stream, queue):
while True:
line = stream.readline()
if line: # 'data\n' or '\n'
queue.put(line)
else: # '' e.g. EOF
if self._eof_message:
sys.stdout.write(self._eof_message + '\n')
queue.put(self._poisonpill)
return
self._t = Thread(target = _populateQueue,
args = (self._s, self._q))
self._t.daemon = True
self._t.start()
def readline(self, timeout = None):
try:
line = self._q.get(block = timeout is not None,
timeout = timeout)
if line == self._poisonpill:
raise EndOfStream
return line
except Empty:
return None
class EndOfStream(Exception): pass
# Replaces all text found using the regular expression to_replace with the supplied replacement.
def replace_text(file, to_replace, replacement):
with open(file, "r") as conf:
contents = conf.read()
replaced_text = re.sub(to_replace, replacement, contents)
with open(file, "w") as f:
f.write(replaced_text)
# Replaces the current process environment with the one found in
# config file. Retains a few original vars (HOME,PATH, etc) by default.
# Optionally allows specification of a command to be run before loading
# the environment, to allow the framework to set environment variables
# Note: This command *cannot* print to stdout!
#
# Note: This will not replace the sudo environment (e.g. subprocess.check_call("sudo <command>")).
# If you must use sudo, consider sudo sh -c ". <config> && your_command"
def replace_environ(config=None, root=None, print_result=False, command='true'):
if platform.system().lower() == 'windows':
pass
else:
# Clean up our current environment, preserving some important items
mini_environ = {}
for envname in ['HOME', 'PATH', 'LANG', 'USER', 'LD_LIBRARY_PATH', 'PYTHONPATH', 'FWROOT', 'TRAVIS']:
if envname in os.environ:
mini_environ[envname] = os.environ[envname]
for key in os.environ:
if key.startswith(('TFB_', 'TRAVIS_')): # Any TFB_* and TRAVIS_* variables are preserved
mini_environ[key] = os.environ[key]
os.environ.clear()
# Use FWROOT if explicitely provided
if root is not None:
mini_environ['FWROOT']=root
# Run command, source config file, and store resulting environment
setup_env = "%s && . %s && env" % (command, config)
env = ""
try:
env = subprocess.check_output(setup_env, shell=True, env=mini_environ,
executable='/bin/bash')
except subprocess.CalledProcessError:
# Ensure that an error here does not crash the toolset
print "CRITICAL: Loading %s returned non-zero exit" % config
for key,value in mini_environ.iteritems():
os.environ[key]=value
return
for line in env.split('\n'):
try:
key, value = line.split('=', 1)
# If we already have this TFB_ variable, do not overwrite
if key.startswith('TFB_') and key in mini_environ:
os.environ[key]=mini_environ[key]
else:
os.environ[key]=value
except Exception:
if not line: # Don't warn for empty line
continue
print "WARN: Line '%s' from '%s' is not an environment variable" % (line, config)
continue
if print_result:
out = subprocess.check_output('env', shell=True, executable='/bin/bash')
print "Environment after loading %s" %config
print out
# Queries the shell for the value of FWROOT
def get_fwroot():
if platform.system().lower() == 'windows':
fwroot = "C:\FrameworkBenchmarks"
return fwroot
else:
try:
# Use printf to avoid getting a newline
# Redirect to avoid stderr printing
fwroot = subprocess.check_output('printf $FWROOT 2> /dev/null', shell=True, executable='/bin/bash')
return fwroot
except subprocess.CalledProcessError:
# Make a last-guess effort ;-)
return os.getcwd();
# Turns absolute path into path relative to FWROOT
# Assumes path is underneath FWROOT, not above
#
# Useful for clean presentation of paths
# e.g. /foo/bar/benchmarks/go/install.sh
# v.s. FWROOT/go/install.sh
def path_relative_to_root(path):
# Requires bash shell parameter expansion
return subprocess.check_output("D=%s && printf \"${D#%s}\""%(path, get_fwroot()), shell=True, executable='/bin/bash')
|
slipcon/gitlint
|
refs/heads/master
|
qa/base.py
|
2
|
import os
from datetime import datetime
from uuid import uuid4
from unittest2 import TestCase
from sh import git, rm, touch # pylint: disable=no-name-in-module
class BaseTestCase(TestCase):
# In case of assert failures, print the full error message
maxDiff = None
tmp_git_repo = None
@classmethod
def setUpClass(cls):
""" Sets up the integration tests by creating a new temporary git repository """
cls.tmp_git_repo = os.path.realpath("/tmp/gitlint-test-%s" % datetime.now().strftime("%Y%m%d-%H%M%S"))
git("init", cls.tmp_git_repo)
# configuring name and email is required in every git repot
git("config", "user.name", "gitlint-test-user", _cwd=cls.tmp_git_repo)
git("config", "user.email", "gitlint@test.com", _cwd=cls.tmp_git_repo)
@classmethod
def tearDownClass(cls):
""" Cleans up the temporary git repository """
rm("-rf", cls.tmp_git_repo)
def _create_simple_commit(self, message, out=None, ok_code=None, env=None):
""" Creates a simple commit with an empty test file.
:param message: Commit message for the commit. """
test_filename = "test-file-" + str(uuid4())
touch(test_filename, _cwd=self.tmp_git_repo)
git("add", test_filename, _cwd=self.tmp_git_repo)
# https://amoffat.github.io/sh/#interactive-callbacks
git("commit", "-m", message, _cwd=self.tmp_git_repo, _tty_in=True, _out=out, _ok_code=ok_code, _env=env)
return test_filename
@staticmethod
def get_sample_path(filename=""):
samples_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "samples")
return os.path.join(samples_dir, filename)
|
xuegang/gpdb
|
refs/heads/master
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/commit_drop_tests/post_sql/test_postsqls.py
|
63
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
'''
Post sqls for create_tests
'''
class TestPostSQLClass(SQLTestCase):
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output/'
|
RubenKelevra/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/mac/gyptest-strip-default.py
|
232
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the default STRIP_STYLEs match between different generators.
"""
import TestGyp
import re
import subprocess
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR='strip'
test.run_gyp('test-defaults.gyp', chdir=CHDIR)
test.build('test-defaults.gyp', test.ALL, chdir=CHDIR)
# Lightweight check if stripping was done.
def OutPath(s):
return test.built_file_path(s, chdir=CHDIR)
def CheckNsyms(p, o_expected):
proc = subprocess.Popen(['nm', '-aU', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
# Filter out mysterious "00 0000 OPT radr://5614542" symbol which
# is apparently only printed on the bots (older toolchain?).
# Yes, "radr", not "rdar".
o = ''.join(filter(lambda s: 'radr://5614542' not in s, o.splitlines(True)))
o = o.replace('A', 'T')
o = re.sub(r'^[a-fA-F0-9]+', 'XXXXXXXX', o, flags=re.MULTILINE)
assert not proc.returncode
if o != o_expected:
print 'Stripping: Expected symbols """\n%s""", got """\n%s"""' % (
o_expected, o)
test.fail_test()
CheckNsyms(OutPath('libsingle_dylib.dylib'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_so.so'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_exe'),
"""\
XXXXXXXX T __mh_execute_header
""")
CheckNsyms(test.built_file_path(
'bundle_dylib.framework/Versions/A/bundle_dylib', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_so.bundle/Contents/MacOS/bundle_so', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_exe.app/Contents/MacOS/bundle_exe', chdir=CHDIR),
"""\
XXXXXXXX T __mh_execute_header
""")
test.pass_test()
|
Lynx187/script.module.urlresolver
|
refs/heads/master
|
lib/urlresolver/plugins/realdebrid.py
|
3
|
"""
urlresolver XBMC Addon
Copyright (C) 2013 t0mm0, JUL1EN094, bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import urllib
import xbmcgui
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import SiteAuth
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
from t0mm0.common.net import Net
import simplejson as json
# SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class RealDebridResolver(Plugin, UrlResolver, SiteAuth, PluginSettings):
implements = [UrlResolver, SiteAuth, PluginSettings]
name = "realdebrid"
domains = ["*"]
profile_path = common.profile_path
cookie_file = os.path.join(profile_path, '%s.cookies' % name)
media_url = None
def __init__(self):
p = self.get_setting('priority') or 1
self.priority = int(p)
self.net = Net()
self.hosters = None
self.hosts = None
try:
os.makedirs(os.path.dirname(self.cookie_file))
except OSError:
pass
# UrlResolver methods
def get_media_url(self, host, media_id):
dialog = xbmcgui.Dialog()
url = 'https://real-debrid.com/ajax/unrestrict.php?link=%s' % media_id.replace('|User-Agent=Mozilla%2F5.0%20(Windows%20NT%206.1%3B%20rv%3A11.0)%20Gecko%2F20100101%20Firefox%2F11.0', '')
source = self.net.http_GET(url).content
jsonresult = json.loads(source)
if 'generated_links' in jsonresult:
generated_links = jsonresult['generated_links']
if len(generated_links) == 1:
return generated_links[0][2].encode('utf-8')
line = []
for link in generated_links:
extension = link[0].split('.')[-1]
line.append(extension.encode('utf-8'))
result = dialog.select('Choose the link', line)
if result != -1:
link = generated_links[result][2]
return link.encode('utf-8')
else:
raise UrlResolver.ResolverError('No generated_link')
elif 'main_link' in jsonresult:
return jsonresult['main_link'].encode('utf-8')
else:
if 'message' in jsonresult:
raise UrlResolver.ResolverError(jsonresult['message'].encode('utf-8'))
else:
raise UrlResolver.ResolverError('No generated_link and no main_link')
def get_url(self, host, media_id):
return media_id
def get_host_and_id(self, url):
return 'www.real-debrid.com', url
def get_all_hosters(self):
if self.hosters is None:
try:
url = 'http://www.real-debrid.com/api/regex.php?type=all'
response = self.net.http_GET(url).content.lstrip('/').rstrip('/g')
delim = '/g,/|/g\|-\|/'
self.hosters = [re.compile(host) for host in re.split(delim, response)]
except:
self.hosters = []
common.addon.log_debug('RealDebrid hosters : %s' % self.hosters)
return self.hosters
def get_hosts(self):
if self.hosts is None:
try:
url = 'https://real-debrid.com/api/hosters.php'
response = self.net.http_GET(url).content
response = response[1:-1]
self.hosts = response.split('","')
except:
self.hosts = []
common.addon.log_debug('RealDebrid hosts : %s' % self.hosts)
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
if self.get_setting('login') == 'false': return False
common.addon.log_debug('in valid_url %s : %s' % (url, host))
if url:
self.get_all_hosters()
for host in self.hosters:
# common.addon.log_debug('RealDebrid checking host : %s' %str(host))
if re.search(host, url):
common.addon.log_debug('RealDebrid Match found')
return True
elif host:
self.get_hosts()
if host in self.hosts or any(item in host for item in self.hosts):
return True
return False
def checkLogin(self):
url = 'https://real-debrid.com/api/account.php'
if not os.path.exists(self.cookie_file):
return True
self.net.set_cookies(self.cookie_file)
source = self.net.http_GET(url).content
common.addon.log_debug(source)
if re.search('expiration', source):
common.addon.log_debug('checkLogin returning False')
return False
else:
common.addon.log_debug('checkLogin returning True')
return True
# SiteAuth methods
def login(self):
if self.checkLogin():
try:
common.addon.log_debug('Need to login since session is invalid')
import hashlib
login_data = urllib.urlencode({'user': self.get_setting('username'), 'pass': hashlib.md5(self.get_setting('password')).hexdigest()})
url = 'https://real-debrid.com/ajax/login.php?' + login_data
source = self.net.http_GET(url).content
if re.search('OK', source):
self.net.save_cookies(self.cookie_file)
self.net.set_cookies(self.cookie_file)
return True
except:
common.addon.log_debug('error with http_GET')
dialog = xbmcgui.Dialog()
dialog.ok(' Real-Debrid ', ' Unexpected error, Please try again.', '', '')
else:
return False
else:
return True
# PluginSettings methods
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting id="%s_login" ' % (self.__class__.__name__)
xml += 'type="bool" label="login" default="false"/>\n'
xml += '<setting id="%s_username" enable="eq(-1,true)" ' % (self.__class__.__name__)
xml += 'type="text" label="username" default=""/>\n'
xml += '<setting id="%s_password" enable="eq(-2,true)" ' % (self.__class__.__name__)
xml += 'type="text" label="password" option="hidden" default=""/>\n'
return xml
# to indicate if this is a universal resolver
def isUniversal(self):
return True
|
JARR-aggregator/JARR
|
refs/heads/master
|
newspipe/bootstrap.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -
# required imports and code execution for basic functionning
import calendar
import logging
import os
from flask import Flask, request
from flask_migrate import Migrate
from flask_talisman import Talisman
from flask_babel import Babel, format_datetime
from flask_sqlalchemy import SQLAlchemy
def set_logging(
log_path=None,
log_level=logging.INFO,
modules=(),
log_format="%(asctime)s %(levelname)s %(message)s",
):
if not modules:
modules = (
"root",
"bootstrap",
"runserver",
"newspipe.crawler.default_crawler",
"manager",
"plugins",
)
if log_path:
if not os.path.exists(os.path.dirname(log_path)):
os.makedirs(os.path.dirname(log_path))
if not os.path.exists(log_path):
open(log_path, "w").close()
handler = logging.FileHandler(log_path)
else:
handler = logging.StreamHandler()
formater = logging.Formatter(log_format)
handler.setFormatter(formater)
for logger_name in modules:
logger = logging.getLogger(logger_name)
logger.addHandler(handler)
for handler in logger.handlers:
handler.setLevel(log_level)
logger.setLevel(log_level)
# Create Flask application
application = Flask(__name__, instance_relative_config=True)
configuration = os.environ.get("NEWSPIPE_CONFIG", False)
if configuration == "testing":
application.debug = logging.DEBUG
application.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
application.config["TESTING"] = True
elif configuration:
# if the configuration file is specified via an environment variable
application.config.from_pyfile(configuration, silent=False)
else:
try:
application.config.from_pyfile("development.py", silent=False)
except Exception:
application.config.from_pyfile("sqlite.py", silent=False)
set_logging(application.config["LOG_PATH"])
db = SQLAlchemy(application)
migrate = Migrate(application, db)
talisman = Talisman(application, content_security_policy=application.config["CONTENT_SECURITY_POLICY"])
babel = Babel(application)
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# user = getattr(g, 'user', None)
# if user is not None:
# return user.locale
# otherwise try to guess the language from the user accept
# header the browser transmits. We support de/fr/en in this
# example. The best match wins.
return request.accept_languages.best_match(application.config["LANGUAGES"].keys())
# Jinja filters
def month_name(month_number):
return calendar.month_name[month_number]
def datetimeformat(value, format="%Y-%m-%d %H:%M"):
return value.strftime(format)
application.jinja_env.filters["month_name"] = month_name
application.jinja_env.filters["datetime"] = format_datetime
application.jinja_env.filters["datetimeformat"] = datetimeformat
# inject application in Jinja env
application.jinja_env.globals["application"] = application
|
jjmleiro/hue
|
refs/heads/master
|
desktop/core/ext-py/guppy-0.1.10/guppy/etc/xterm.py
|
37
|
#._cv_part xterm
# Run an xterm on current process or a forked process
# Adapted from pty.py in Python 1.5.2 distribution.
# The pty.fork() couldnt be used because it didn't return
# the pty name needed by xterm
# I couldnt import pty.py to use master_open because it didn't find termios.
import os, sys, FCNTL
# We couldnt find termios
STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO = 0, 1, 2
# Open pty master. Returns (master_fd, tty_name). SGI and Linux/BSD version.
# Copied from pty.py from Python 1.5.2. /SN
def master_open():
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(FCNTL.O_RDWR, 0666, 0)
except IOError, msg:
raise os.error, msg
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, FCNTL.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
# Open the pty slave. Acquire the controlling terminal.
# Returns file descriptor. Linux version. (Should be universal? --Guido)
# Copied from pty.py from Python 1.5.2. /SN
def slave_open(tty_name):
return os.open(tty_name, FCNTL.O_RDWR)
def xterm(prog = None, options=''):
master_fd, tty_name = master_open()
pid = os.fork()
if pid:
# Acquire controlling terminal.
slave_fd = slave_open(tty_name)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
os.close(master_fd)
sys.stdin.readline() # Throw away an init string from xterm
if prog is not None:
prog()
else:
os.setsid()
cmd = 'xterm %s -S%s%d'%(options, tty_name[-2:], master_fd)
os.system(cmd)
#os.waitpid(pid, 0)
return pid
def forkxterm(prog = None, options=''):
pid = os.fork()
if pid:
return pid
else:
os.setsid()
pid = xterm(prog, options)
if not pid:
os._exit(0)
def hello():
print 'hello'
while 1:
pass
|
dnjohnstone/hyperspy
|
refs/heads/RELEASE_next_minor
|
hyperspy/learn/mva.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import types
import warnings
import dask.array as da
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FuncFormatter, MaxNLocator
import hyperspy.misc.io.tools as io_tools
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.learn.mlpca import mlpca
from hyperspy.learn.ornmf import ornmf
from hyperspy.learn.orthomax import orthomax
from hyperspy.learn.rpca import orpca, rpca_godec
from hyperspy.learn.svd_pca import svd_pca
from hyperspy.learn.whitening import whiten_data
from hyperspy.misc.machine_learning import import_sklearn
from hyperspy.misc.utils import ordinal, stack
try:
import mdp
mdp_installed = True
except ImportError:
mdp_installed = False
_logger = logging.getLogger(__name__)
if import_sklearn.sklearn_installed:
decomposition_algorithms = {
"sklearn_pca": import_sklearn.sklearn.decomposition.PCA,
"nmf": import_sklearn.sklearn.decomposition.NMF,
"sparse_pca": import_sklearn.sklearn.decomposition.SparsePCA,
"mini_batch_sparse_pca": import_sklearn.sklearn.decomposition.MiniBatchSparsePCA,
"sklearn_fastica": import_sklearn.sklearn.decomposition.FastICA,
}
def _get_derivative(signal, diff_axes, diff_order):
"""Calculate the derivative of a signal."""
if signal.axes_manager.signal_dimension == 1:
signal = signal.diff(order=diff_order, axis=-1)
else:
# n-d signal case.
# Compute the differences for each signal axis, unfold the
# signal axes and stack the differences over the signal
# axis.
if diff_axes is None:
diff_axes = signal.axes_manager.signal_axes
iaxes = [axis.index_in_axes_manager for axis in diff_axes]
else:
iaxes = diff_axes
diffs = [signal.derivative(order=diff_order, axis=i) for i in iaxes]
for signal in diffs:
signal.unfold()
signal = stack(diffs, axis=-1)
del diffs
return signal
def _normalize_components(target, other, function=np.sum):
"""Normalize components according to a function."""
coeff = function(target, axis=0)
target /= coeff
other *= coeff
class MVA:
"""Multivariate analysis capabilities for the Signal1D class."""
def __init__(self):
if not hasattr(self, "learning_results"):
self.learning_results = LearningResults()
def decomposition(
self,
normalize_poissonian_noise=False,
algorithm="svd",
output_dimension=None,
centre=None,
auto_transpose=True,
navigation_mask=None,
signal_mask=None,
var_array=None,
var_func=None,
reproject=None,
return_info=False,
print_info=True,
svd_solver="auto",
copy=True,
**kwargs,
):
"""Apply a decomposition to a dataset with a choice of algorithms.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <mva.decomposition>`.
Parameters
----------
normalize_poissonian_noise : bool, default False
If True, scale the signal to normalize Poissonian noise using
the approach described in [Keenan2004]_.
algorithm : {"svd", "mlpca", "sklearn_pca", "nmf", "sparse_pca", "mini_batch_sparse_pca", "rpca", "orpca", "ornmf", custom object}, default "svd"
The decomposition algorithm to use. If algorithm is an object,
it must implement a ``fit_transform()`` method or ``fit()`` and
``transform()`` methods, in the same manner as a scikit-learn estimator.
output_dimension : None or int
Number of components to keep/calculate.
Default is None, i.e. ``min(data.shape)``.
centre : {None, "navigation", "signal"}, default None
* If None, the data is not centered prior to decomposition.
* If "navigation", the data is centered along the navigation axis.
Only used by the "svd" algorithm.
* If "signal", the data is centered along the signal axis.
Only used by the "svd" algorithm.
auto_transpose : bool, default True
If True, automatically transposes the data to boost performance.
Only used by the "svd" algorithm.
navigation_mask : boolean numpy array
The navigation locations marked as True are not used in the
decompostion.
signal_mask : boolean numpy array
The signal locations marked as True are not used in the
decomposition.
var_array : numpy array
Array of variance for the maximum likelihood PCA algorithm.
Only used by the "mlpca" algorithm.
var_func : None or function or numpy array, default None
* If None, ignored
* If function, applies the function to the data to obtain ``var_array``.
Only used by the "mlpca" algorithm.
* If numpy array, creates ``var_array`` by applying a polynomial function
defined by the array of coefficients to the data. Only used by
the "mlpca" algorithm.
reproject : {None, "signal", "navigation", "both"}, default None
If not None, the results of the decomposition will be projected in
the selected masked area.
return_info: bool, default False
The result of the decomposition is stored internally. However,
some algorithms generate some extra information that is not
stored. If True, return any extra information if available.
In the case of sklearn.decomposition objects, this includes the
sklearn Estimator object.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
svd_solver : {"auto", "full", "arpack", "randomized"}, default "auto"
If auto:
The solver is selected by a default policy based on `data.shape` and
`output_dimension`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient "randomized"
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
If full:
run exact SVD, calling the standard LAPACK solver via
:py:func:`scipy.linalg.svd`, and select the components by postprocessing
If arpack:
use truncated SVD, calling ARPACK solver via
:py:func:`scipy.sparse.linalg.svds`. It requires strictly
`0 < output_dimension < min(data.shape)`
If randomized:
use truncated SVD, calling :py:func:`sklearn.utils.extmath.randomized_svd`
to estimate a limited number of components
copy : bool, default True
* If True, stores a copy of the data before any pre-treatments
such as normalization in ``s._data_before_treatments``. The original
data can then be restored by calling ``s.undo_treatments()``.
* If False, no copy is made. This can be beneficial for memory
usage, but care must be taken since data will be overwritten.
**kwargs : extra keyword arguments
Any keyword arguments are passed to the decomposition algorithm.
Returns
-------
return_info : tuple(numpy array, numpy array) or sklearn.Estimator or None
* If True and 'algorithm' in ['rpca', 'orpca', 'ornmf'], returns
the low-rank (X) and sparse (E) matrices from robust PCA/NMF.
* If True and 'algorithm' is an sklearn Estimator, returns the
Estimator object.
* Otherwise, returns None
References
----------
.. [Keenan2004] M. Keenan and P. Kotula, "Accounting for Poisson noise
in the multivariate analysis of ToF-SIMS spectrum images", Surf.
Interface Anal 36(3) (2004): 203-212.
See Also
--------
* :py:meth:`~.signal.MVATools.plot_decomposition_factors`
* :py:meth:`~.signal.MVATools.plot_decomposition_loadings`
* :py:meth:`~.signal.MVATools.plot_decomposition_results`
* :py:meth:`~.learn.mva.MVA.plot_explained_variance_ratio`
* :py:meth:`~._signals.lazy.LazySignal.decomposition` for lazy signals
"""
# Check data is suitable for decomposition
if self.data.dtype.char not in np.typecodes["AllFloat"]:
raise TypeError(
"To perform a decomposition the data must be of the "
f"float or complex type, but the current type is '{self.data.dtype}'. "
"To fix this issue, you can change the type using the "
"change_dtype method (e.g. s.change_dtype('float64')) "
"and then repeat the decomposition.\n"
"No decomposition was performed."
)
if self.axes_manager.navigation_size < 2:
raise AttributeError(
"It is not possible to decompose a dataset with navigation_size < 2"
)
# Check for deprecated algorithm arguments
algorithms_deprecated = {
"fast_svd": "svd",
"fast_mlpca": "mlpca",
"RPCA_GoDec": "rpca",
"ORPCA": "orpca",
"ORNMF": "ornmf",
}
new_algo = algorithms_deprecated.get(algorithm, None)
if new_algo:
if "fast" in algorithm:
warnings.warn(
f"The algorithm name `{algorithm}` has been deprecated and will be "
f"removed in HyperSpy 2.0. Please use `{new_algo}` along with the "
"argument `svd_solver='randomized'` instead.",
VisibleDeprecationWarning,
)
svd_solver = "randomized"
else:
warnings.warn(
f"The algorithm name `{algorithm}` has been deprecated and will be "
f"removed in HyperSpy 2.0. Please use `{new_algo}` instead.",
VisibleDeprecationWarning,
)
# Update algorithm name
algorithm = new_algo
# Check algorithms requiring output_dimension
algorithms_require_dimension = [
"mlpca",
"rpca",
"orpca",
"ornmf",
]
if algorithm in algorithms_require_dimension and output_dimension is None:
raise ValueError(f"`output_dimension` must be specified for '{algorithm}'")
# Check sklearn-like algorithms
is_sklearn_like = False
algorithms_sklearn = [
"sklearn_pca",
"nmf",
"sparse_pca",
"mini_batch_sparse_pca",
]
if algorithm in algorithms_sklearn:
if not import_sklearn.sklearn_installed:
raise ImportError(f"algorithm='{algorithm}' requires scikit-learn")
# Initialize the sklearn estimator
is_sklearn_like = True
estim = decomposition_algorithms[algorithm](
n_components=output_dimension, **kwargs
)
elif hasattr(algorithm, "fit_transform") or (
hasattr(algorithm, "fit") and hasattr(algorithm, "transform")
):
# Check properties of algorithm against typical sklearn objects
# If algorithm is an object that implements the methods fit(),
# transform() and fit_transform(), then we can use it like an
# sklearn estimator. This also allows us to, for example, use
# Pipeline and GridSearchCV objects.
is_sklearn_like = True
estim = algorithm
# MLPCA is designed to handle count data & Poisson noise
if algorithm == "mlpca" and normalize_poissonian_noise:
warnings.warn(
"It does not make sense to normalize Poisson noise with "
"the maximum-likelihood MLPCA algorithm. Therefore, "
"`normalize_poissonian_noise` is set to False.",
UserWarning,
)
normalize_poissonian_noise = False
# Check for deprecated polyfit
polyfit = kwargs.get("polyfit", False)
if polyfit:
warnings.warn(
"The `polyfit` argument has been deprecated and will be "
"removed in HyperSpy 2.0. Please use `var_func` instead.",
VisibleDeprecationWarning,
)
var_func = polyfit
# Initialize return_info and print_info
to_return = None
to_print = [
"Decomposition info:",
f" normalize_poissonian_noise={normalize_poissonian_noise}",
f" algorithm={algorithm}",
f" output_dimension={output_dimension}",
f" centre={centre}",
]
# Backup the original data (on by default to
# mimic previous behaviour)
if copy:
self._data_before_treatments = self.data.copy()
# set the output target (peak results or not?)
target = LearningResults()
# Apply pre-treatments
# Transform the data in a line spectrum
self._unfolded4decomposition = self.unfold()
try:
_logger.info("Performing decomposition analysis")
if hasattr(navigation_mask, "ravel"):
navigation_mask = navigation_mask.ravel()
if hasattr(signal_mask, "ravel"):
signal_mask = signal_mask.ravel()
# Normalize the poissonian noise
# TODO this function can change the masks and
# this can cause problems when reprojecting
if normalize_poissonian_noise:
if centre is not None:
raise ValueError(
"normalize_poissonian_noise=True is only compatible "
f"with `centre=None`, not `centre={centre}`."
)
self.normalize_poissonian_noise(
navigation_mask=navigation_mask, signal_mask=signal_mask,
)
# The rest of the code assumes that the first data axis
# is the navigation axis. We transpose the data if that
# is not the case.
if self.axes_manager[0].index_in_array == 0:
dc = self.data
else:
dc = self.data.T
# Transform the None masks in slices to get the right behaviour
if navigation_mask is None:
navigation_mask = slice(None)
else:
navigation_mask = ~navigation_mask
if signal_mask is None:
signal_mask = slice(None)
else:
signal_mask = ~signal_mask
# WARNING: signal_mask and navigation_mask values are now their
# negaties i.e. True -> False and viceversa. However, the
# stored value (at the end of the method) coincides with the
# input masks
data_ = dc[:, signal_mask][navigation_mask, :]
# Reset the explained_variance which is not set by all the
# algorithms
explained_variance = None
explained_variance_ratio = None
number_significant_components = None
mean = None
if algorithm == "svd":
factors, loadings, explained_variance, mean = svd_pca(
data_,
svd_solver=svd_solver,
output_dimension=output_dimension,
centre=centre,
auto_transpose=auto_transpose,
**kwargs,
)
elif algorithm == "mlpca":
if var_array is not None and var_func is not None:
raise ValueError(
"`var_func` and `var_array` cannot both be defined. "
"Please define just one of them."
)
elif var_array is None and var_func is None:
_logger.info(
"No variance array provided. Assuming Poisson-distributed data"
)
var_array = data_
elif var_array is not None:
if var_array.shape != data_.shape:
raise ValueError(
"`var_array` must have the same shape as input data"
)
elif var_func is not None:
if callable(var_func):
var_array = var_func(data_)
elif isinstance(var_func, (np.ndarray, list)):
var_array = np.polyval(var_func, data_)
else:
raise ValueError(
"`var_func` must be either a function or an array "
"defining the coefficients of a polynomial"
)
U, S, V, Sobj = mlpca(
data_, var_array, output_dimension, svd_solver=svd_solver, **kwargs,
)
loadings = U * S
factors = V
explained_variance = S ** 2 / len(factors)
elif algorithm == "rpca":
X, E, U, S, V = rpca_godec(data_, rank=output_dimension, **kwargs)
loadings = U * S
factors = V
explained_variance = S ** 2 / len(factors)
if return_info:
to_return = (X, E)
elif algorithm == "orpca":
if return_info:
X, E, U, S, V = orpca(
data_, rank=output_dimension, store_error=True, **kwargs
)
loadings = U * S
factors = V
explained_variance = S ** 2 / len(factors)
to_return = (X, E)
else:
L, R = orpca(data_, rank=output_dimension, **kwargs)
loadings = L
factors = R.T
elif algorithm == "ornmf":
if return_info:
X, E, W, H = ornmf(
data_, rank=output_dimension, store_error=True, **kwargs,
)
to_return = (X, E)
else:
W, H = ornmf(data_, rank=output_dimension, **kwargs)
loadings = W
factors = H.T
elif is_sklearn_like:
if hasattr(estim, "fit_transform"):
loadings = estim.fit_transform(data_)
elif hasattr(estim, "fit") and hasattr(estim, "transform"):
estim.fit(data_)
loadings = estim.transform(data_)
# Handle sklearn.pipeline.Pipeline objects
# by taking the last step
if hasattr(estim, "steps"):
estim_ = estim[-1]
# Handle GridSearchCV and related objects
# by taking the best estimator
elif hasattr(estim, "best_estimator_"):
estim_ = estim.best_estimator_
# Handle the "usual case"
else:
estim_ = estim
# We need to the components_ to set factors
if not hasattr(estim_, "components_"):
raise AttributeError(
f"Fitted estimator {str(estim_)} has no attribute 'components_'"
)
factors = estim_.components_.T
if hasattr(estim_, "explained_variance_"):
explained_variance = estim_.explained_variance_
if hasattr(estim_, "mean_"):
mean = estim_.mean_
centre = "samples"
# Return the full estimator object
to_print.extend(["scikit-learn estimator:", estim])
if return_info:
to_return = estim
else:
raise ValueError("'algorithm' not recognised")
# We must calculate the ratio here because otherwise the sum
# information can be lost if the user subsequently calls
# crop_decomposition_dimension()
if explained_variance is not None and explained_variance_ratio is None:
explained_variance_ratio = explained_variance / explained_variance.sum()
number_significant_components = (
self.estimate_elbow_position(explained_variance_ratio) + 1
)
# Store the results in learning_results
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
target.number_significant_components = number_significant_components
target.decomposition_algorithm = algorithm
target.poissonian_noise_normalized = normalize_poissonian_noise
target.output_dimension = output_dimension
target.unfolded = self._unfolded4decomposition
target.centre = centre
target.mean = mean
if output_dimension and factors.shape[1] != output_dimension:
target.crop_decomposition_dimension(output_dimension)
# Delete the unmixing information, as it will refer to a
# previous decomposition
target.unmixing_matrix = None
target.bss_algorithm = None
if self._unfolded4decomposition:
folding = self.metadata._HyperSpy.Folding
target.original_shape = folding.original_shape
# Reproject
if mean is None:
mean = 0
if reproject in ("navigation", "both"):
if not is_sklearn_like:
loadings_ = (dc[:, signal_mask] - mean) @ factors
else:
loadings_ = estim.transform(dc[:, signal_mask])
target.loadings = loadings_
if reproject in ("signal", "both"):
if not is_sklearn_like:
factors = (
np.linalg.pinv(loadings) @ (dc[navigation_mask, :] - mean)
).T
target.factors = factors
else:
warnings.warn(
"Reprojecting the signal is not yet "
f"supported for algorithm='{algorithm}'",
UserWarning,
)
if reproject == "both":
reproject = "signal"
else:
reproject = None
# Rescale the results if the noise was normalized
if normalize_poissonian_noise:
target.factors[:] *= self._root_bH.T
target.loadings[:] *= self._root_aG
# Set the pixels that were not processed to nan
if not isinstance(signal_mask, slice):
# Store the (inverted, as inputed) signal mask
target.signal_mask = ~signal_mask.reshape(
self.axes_manager._signal_shape_in_array
)
if reproject not in ("both", "signal"):
factors = np.zeros((dc.shape[-1], target.factors.shape[1]))
factors[signal_mask, :] = target.factors
factors[~signal_mask, :] = np.nan
target.factors = factors
if not isinstance(navigation_mask, slice):
# Store the (inverted, as inputed) navigation mask
target.navigation_mask = ~navigation_mask.reshape(
self.axes_manager._navigation_shape_in_array
)
if reproject not in ("both", "navigation"):
loadings = np.zeros((dc.shape[0], target.loadings.shape[1]))
loadings[navigation_mask, :] = target.loadings
loadings[~navigation_mask, :] = np.nan
target.loadings = loadings
finally:
if self._unfolded4decomposition:
self.fold()
self._unfolded4decomposition = False
self.learning_results.__dict__.update(target.__dict__)
# Undo any pre-treatments by restoring the copied data
if copy:
self.undo_treatments()
# Print details about the decomposition we just performed
if print_info:
print("\n".join([str(pr) for pr in to_print]))
return to_return
def blind_source_separation(
self,
number_of_components=None,
algorithm="sklearn_fastica",
diff_order=1,
diff_axes=None,
factors=None,
comp_list=None,
mask=None,
on_loadings=False,
reverse_component_criterion="factors",
whiten_method="pca",
return_info=False,
print_info=True,
**kwargs,
):
"""Apply blind source separation (BSS) to the result of a decomposition.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <mva.blind_source_separation>`.
Parameters
----------
number_of_components : int or None
Number of principal components to pass to the BSS algorithm.
If None, you must specify the ``comp_list`` argument.
algorithm : {"sklearn_fastica", "orthomax", "FastICA", "JADE", "CuBICA", "TDSEP", custom object}, default "sklearn_fastica"
The BSS algorithm to use. If algorithm is an object,
it must implement a ``fit_transform()`` method or ``fit()`` and
``transform()`` methods, in the same manner as a scikit-learn estimator.
diff_order : int, default 1
Sometimes it is convenient to perform the BSS on the derivative of
the signal. If ``diff_order`` is 0, the signal is not differentiated.
diff_axes : None or list of ints or strings
* If None and `on_loadings` is False, when `diff_order` is greater than 1
and `signal_dimension` is greater than 1, the differences are calculated
across all signal axes
* If None and `on_loadings` is True, when `diff_order` is greater than 1
and `navigation_dimension` is greater than 1, the differences are calculated
across all navigation axes
* Otherwise the axes can be specified in a list.
factors : :py:class:`~hyperspy.signal.BaseSignal` or numpy array
Factors to decompose. If None, the BSS is performed on the
factors of a previous decomposition. If a Signal instance, the
navigation dimension must be 1 and the size greater than 1.
comp_list : None or list or numpy array
Choose the components to apply BSS to. Unlike ``number_of_components``,
this argument permits non-contiguous components.
mask : :py:class:`~hyperspy.signal.BaseSignal` or subclass
If not None, the signal locations marked as True are masked. The
mask shape must be equal to the signal shape
(navigation shape) when `on_loadings` is False (True).
on_loadings : bool, default False
If True, perform the BSS on the loadings of a previous
decomposition, otherwise, perform the BSS on the factors.
reverse_component_criterion : {"factors", "loadings"}, default "factors"
Use either the factors or the loadings to determine if the
component needs to be reversed.
whiten_method : {"pca", "zca", None}, default "pca"
How to whiten the data prior to blind source separation.
If None, no whitening is applied. See :py:func:`~.learn.whitening.whiten_data`
for more details.
return_info: bool, default False
The result of the decomposition is stored internally. However,
some algorithms generate some extra information that is not
stored. If True, return any extra information if available.
In the case of sklearn.decomposition objects, this includes the
sklearn Estimator object.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
**kwargs : extra keyword arguments
Any keyword arguments are passed to the BSS algorithm.
Returns
-------
return_info : sklearn.Estimator or None
* If True and 'algorithm' is an sklearn Estimator, returns the
Estimator object.
* Otherwise, returns None
See Also
--------
* :py:meth:`~.signal.MVATools.plot_bss_factors`
* :py:meth:`~.signal.MVATools.plot_bss_loadings`
* :py:meth:`~.signal.MVATools.plot_bss_results`
"""
from hyperspy.signal import BaseSignal
lr = self.learning_results
if factors is None:
if not hasattr(lr, "factors") or lr.factors is None:
raise AttributeError(
"A decomposition must be performed before blind "
"source separation, or factors must be provided."
)
else:
if on_loadings:
factors = self.get_decomposition_loadings()
else:
factors = self.get_decomposition_factors()
if hasattr(factors, "compute"):
# if the factors are lazy, we compute them, which should be fine
# since we already reduce the dimensionality of the data.
factors.compute()
# Check factors
if not isinstance(factors, BaseSignal):
raise TypeError(
"`factors` must be a BaseSignal instance, but an object "
f"of type {type(factors)} was provided"
)
# Check factor dimensions
if factors.axes_manager.navigation_dimension != 1:
raise ValueError(
"`factors` must have navigation dimension == 1, "
"but the navigation dimension of the given factors "
f"is {factors.axes_manager.navigation_dimension}"
)
elif factors.axes_manager.navigation_size < 2:
raise ValueError(
"`factors` must have navigation size"
"greater than one, but the navigation "
"size of the given factors "
f"is {factors.axes_manager.navigation_size}"
)
# Check mask dimensions
if mask is not None:
ref_shape, space = (
factors.axes_manager.signal_shape,
"navigation" if on_loadings else "signal",
)
if isinstance(mask, BaseSignal):
if mask.axes_manager.signal_shape != ref_shape:
raise ValueError(
f"`mask` shape is not equal to {space} shape. "
f"Mask shape: {mask.axes_manager.signal_shape}\t"
f"{space} shape: {ref_shape}"
)
if hasattr(mask, "compute"):
# if the mask is lazy, we compute them, which should be fine
# since we already reduce the dimensionality of the data.
mask.compute()
# Note that we don't check the factor's signal dimension. This is on
# purpose as an user may like to apply pretreaments that change their
# dimensionality.
# The diff_axes are given for the main signal. We need to compute
# the correct diff_axes for the factors.
# Get diff_axes index in axes manager
if diff_axes is not None:
diff_axes = [
1 + axis.index_in_axes_manager
for axis in [self.axes_manager[axis] for axis in diff_axes]
]
if not on_loadings:
diff_axes = [
index - self.axes_manager.navigation_dimension
for index in diff_axes
]
# Select components to separate
if number_of_components is not None:
comp_list = range(number_of_components)
elif comp_list is not None:
number_of_components = len(comp_list)
else:
if lr.output_dimension is not None:
number_of_components = lr.output_dimension
comp_list = range(number_of_components)
else:
raise ValueError("No `number_of_components` or `comp_list` provided")
factors = stack([factors.inav[i] for i in comp_list])
# Check sklearn-like algorithms
is_sklearn_like = False
algorithms_sklearn = ["sklearn_fastica"]
if algorithm in algorithms_sklearn:
if not import_sklearn.sklearn_installed:
raise ImportError(f"algorithm='{algorithm}' requires scikit-learn")
# Set smaller convergence tolerance than sklearn default
if not kwargs.get("tol", False):
kwargs["tol"] = 1e-10
# Initialize the sklearn estimator
is_sklearn_like = True
estim = decomposition_algorithms[algorithm](**kwargs)
# Check whiten argument
if estim.whiten and whiten_method is not None:
_logger.warning(
"HyperSpy already performs its own data whitening "
f"(whiten_method='{whiten_method}'), so it is ignored "
f"for algorithm='{algorithm}'"
)
estim.whiten = False
elif hasattr(algorithm, "fit_transform") or (
hasattr(algorithm, "fit") and hasattr(algorithm, "transform")
):
# Check properties of algorithm against typical sklearn objects
# If algorithm is an object that implements the methods fit(),
# transform() and fit_transform(), then we can use it like an
# sklearn estimator. This also allows us to, for example, use
# Pipeline and GridSearchCV objects.
is_sklearn_like = True
estim = algorithm
# Initialize return_info and print_info
to_return = None
to_print = [
"Blind source separation info:",
f" number_of_components={number_of_components}",
f" algorithm={algorithm}",
f" diff_order={diff_order}",
f" reverse_component_criterion={reverse_component_criterion}",
f" whiten_method={whiten_method}",
]
# Apply differences pre-processing if requested.
if diff_order > 0:
factors = _get_derivative(
factors, diff_axes=diff_axes, diff_order=diff_order
)
if mask is not None:
# The following is a little trick to dilate the mask as
# required when operation on the differences. It exploits the
# fact that np.diff autimatically "dilates" nans. The trick has
# a memory penalty which should be low compare to the total
# memory required for the core application in most cases.
mask_diff_axes = (
[iaxis - 1 for iaxis in diff_axes]
if diff_axes is not None
else None
)
mask.change_dtype("float")
mask.data[mask.data == 1] = np.nan
mask = _get_derivative(
mask, diff_axes=mask_diff_axes, diff_order=diff_order
)
mask.data[np.isnan(mask.data)] = 1
mask.change_dtype("bool")
# Unfold in case the signal_dimension > 1
factors.unfold()
if mask is not None:
mask.unfold()
factors = factors.data.T[np.where(~mask.data)]
else:
factors = factors.data.T
# Center and whiten the data via PCA or ZCA methods
if whiten_method is not None:
_logger.info(f"Whitening the data with method '{whiten_method}'")
factors, invsqcovmat = whiten_data(
factors, centre=True, method=whiten_method
)
# Perform BSS
if algorithm == "orthomax":
_, unmixing_matrix = orthomax(factors, **kwargs)
lr.bss_node = None
elif algorithm in ["FastICA", "JADE", "CuBICA", "TDSEP"]:
if not mdp_installed:
raise ImportError(f"algorithm='{algorithm}' requires MDP toolbox")
temp_function = getattr(mdp.nodes, algorithm + "Node")
lr.bss_node = temp_function(**kwargs)
lr.bss_node.train(factors)
unmixing_matrix = lr.bss_node.get_recmatrix()
to_print.extend(["mdp estimator:", lr.bss_node])
if return_info:
to_return = lr.bss_node
elif is_sklearn_like:
if hasattr(estim, "fit_transform"):
_ = estim.fit_transform(factors)
elif hasattr(estim, "fit") and hasattr(estim, "transform"):
estim.fit(factors)
# Handle sklearn.pipeline.Pipeline objects
# by taking the last step
if hasattr(estim, "steps"):
estim_ = estim[-1]
# Handle GridSearchCV and related objects
# by taking the best estimator
elif hasattr(estim, "best_estimator_"):
estim_ = estim.best_estimator_
# Handle the "usual case"
else:
estim_ = estim
# We need to the components_ to set factors
if hasattr(estim_, "components_"):
unmixing_matrix = estim_.components_
elif hasattr(estim_, "unmixing_matrix_"):
# unmixing_matrix_ was renamed to components_ for FastICA
# https://github.com/scikit-learn/scikit-learn/pull/858,
# so this legacy only
unmixing_matrix = estim_.unmixing_matrix_
else:
raise AttributeError(
f"Fitted estimator {str(estim_)} has no attribute 'components_'"
)
to_print.extend(["scikit-learn estimator:", estim])
if return_info:
to_return = estim
# Store the BSS node
lr.bss_node = estim
else:
raise ValueError("'algorithm' not recognised")
# Apply the whitening matrix to get the full unmixing matrix
if whiten_method is not None:
w = unmixing_matrix @ invsqcovmat
else:
w = unmixing_matrix
if lr.explained_variance is not None:
if hasattr(lr.explained_variance, "compute"):
lr.explained_variance = lr.explained_variance.compute()
# The output of ICA is not sorted in any way what makes it
# difficult to compare results from different unmixings. The
# following code is an experimental attempt to sort them in a
# more predictable way
sorting_indices = np.argsort(
lr.explained_variance[:number_of_components] @ np.abs(w.T)
)[::-1]
w[:] = w[sorting_indices, :]
lr.unmixing_matrix = w
lr.on_loadings = on_loadings
self._unmix_components()
self._auto_reverse_bss_component(reverse_component_criterion)
lr.bss_algorithm = algorithm
lr.bss_node = str(lr.bss_node)
# Print details about the BSS we just performed
if print_info:
print("\n".join([str(pr) for pr in to_print]))
return to_return
def normalize_decomposition_components(self, target="factors", function=np.sum):
"""Normalize decomposition components.
Parameters
----------
target : {"factors", "loadings"}
Normalize components based on the scale of either the factors or loadings.
function : numpy universal function, default np.sum
Each target component is divided by the output of ``function(target)``.
The function must return a scalar when operating on numpy arrays and
must have an `axis` argument.
"""
if target == "factors":
target = self.learning_results.factors
other = self.learning_results.loadings
elif target == "loadings":
target = self.learning_results.loadings
other = self.learning_results.factors
else:
raise ValueError('target must be "factors" or "loadings"')
if target is None:
raise ValueError("This method can only be called after s.decomposition()")
_normalize_components(target=target, other=other, function=function)
def normalize_bss_components(self, target="factors", function=np.sum):
"""Normalize BSS components.
Parameters
----------
target : {"factors", "loadings"}
Normalize components based on the scale of either the factors or loadings.
function : numpy universal function, default np.sum
Each target component is divided by the output of ``function(target)``.
The function must return a scalar when operating on numpy arrays and
must have an `axis` argument.
"""
if target == "factors":
target = self.learning_results.bss_factors
other = self.learning_results.bss_loadings
elif target == "loadings":
target = self.learning_results.bss_loadings
other = self.learning_results.bss_factors
else:
raise ValueError('target must be "factors" or "loadings"')
if target is None:
raise ValueError(
"This method can only be called after s.blind_source_separation()"
)
_normalize_components(target=target, other=other, function=function)
def reverse_decomposition_component(self, component_number):
"""Reverse the decomposition component.
Parameters
----------
component_number : list or int
component index/es
Examples
--------
>>> s = hs.load('some_file')
>>> s.decomposition(True) # perform PCA
>>> s.reverse_decomposition_component(1) # reverse IC 1
>>> s.reverse_decomposition_component((0, 2)) # reverse ICs 0 and 2
"""
if hasattr(self.learning_results.factors, "compute"):
_logger.warning(
f"Component(s) {component_number} not reversed, "
"feature not implemented for lazy computations"
)
else:
target = self.learning_results
for i in [component_number]:
_logger.info(f"Component {i} reversed")
target.factors[:, i] *= -1
target.loadings[:, i] *= -1
def reverse_bss_component(self, component_number):
"""Reverse the independent component.
Parameters
----------
component_number : list or int
component index/es
Examples
--------
>>> s = hs.load('some_file')
>>> s.decomposition(True) # perform PCA
>>> s.blind_source_separation(3) # perform ICA on 3 PCs
>>> s.reverse_bss_component(1) # reverse IC 1
>>> s.reverse_bss_component((0, 2)) # reverse ICs 0 and 2
"""
if hasattr(self.learning_results.bss_factors, "compute"):
_logger.warning(
f"Component(s) {component_number} not reversed, "
"feature not implemented for lazy computations"
)
else:
target = self.learning_results
for i in [component_number]:
_logger.info(f"Component {i} reversed")
target.bss_factors[:, i] *= -1
target.bss_loadings[:, i] *= -1
target.unmixing_matrix[i, :] *= -1
def _unmix_components(self, compute=False):
lr = self.learning_results
w = lr.unmixing_matrix
n = len(w)
try:
w_inv = np.linalg.inv(w)
except np.linalg.LinAlgError as e:
if "Singular matrix" in str(e):
warnings.warn(
"Cannot invert unmixing matrix as it is singular. "
"Will attempt to use np.linalg.pinv instead.",
UserWarning,
)
w_inv = np.linalg.pinv(w)
else:
raise
if lr.on_loadings:
lr.bss_loadings = lr.loadings[:, :n] @ w.T
lr.bss_factors = lr.factors[:, :n] @ w_inv
else:
lr.bss_factors = lr.factors[:, :n] @ w.T
lr.bss_loadings = lr.loadings[:, :n] @ w_inv
if compute:
lr.bss_factors = lr.bss_factors.compute()
lr.bss_loadings = lr.bss_loadings.compute()
def _auto_reverse_bss_component(self, reverse_component_criterion):
n_components = self.learning_results.bss_factors.shape[1]
for i in range(n_components):
if reverse_component_criterion == "factors":
values = self.learning_results.bss_factors
elif reverse_component_criterion == "loadings":
values = self.learning_results.bss_loadings
else:
raise ValueError(
"`reverse_component_criterion` can take only "
"`factor` or `loading` as parameter."
)
minimum = np.nanmin(values[:, i])
maximum = np.nanmax(values[:, i])
if minimum < 0 and -minimum > maximum:
self.reverse_bss_component(i)
_logger.info(
f"Independent component {i} reversed based "
f"on the {reverse_component_criterion}"
)
def _calculate_recmatrix(self, components=None, mva_type="decomposition"):
"""Rebuilds data from selected components.
Parameters
----------
components : None, int, or list of ints
* If None, rebuilds signal instance from all components
* If int, rebuilds signal instance from components in range 0-given int
* If list of ints, rebuilds signal instance from only components in given list
mva_type : str {'decomposition', 'bss'}
Decomposition type (not case sensitive)
Returns
-------
Signal instance
Data built from the given components.
"""
target = self.learning_results
if mva_type.lower() == "decomposition":
factors = target.factors
loadings = target.loadings.T
elif mva_type.lower() == "bss":
factors = target.bss_factors
loadings = target.bss_loadings.T
if components is None:
a = factors @ loadings
signal_name = f"model from {mva_type} with {factors.shape[1]} components"
elif hasattr(components, "__iter__"):
tfactors = np.zeros((factors.shape[0], len(components)))
tloadings = np.zeros((len(components), loadings.shape[1]))
for i in range(len(components)):
tfactors[:, i] = factors[:, components[i]]
tloadings[i, :] = loadings[components[i], :]
a = tfactors @ tloadings
signal_name = f"model from {mva_type} with components {components}"
else:
a = factors[:, :components] @ loadings[:components, :]
signal_name = f"model from {mva_type} with {components} components"
self._unfolded4decomposition = self.unfold()
try:
sc = self.deepcopy()
sc.data = a.T.reshape(self.data.shape)
sc.metadata.General.title += " " + signal_name
if target.mean is not None:
sc.data += target.mean
finally:
if self._unfolded4decomposition:
self.fold()
sc.fold()
self._unfolded4decomposition = False
return sc
def get_decomposition_model(self, components=None):
"""Generate model with the selected number of principal components.
Parameters
----------
components : {None, int, list of ints}, default None
* If None, rebuilds signal instance from all components
* If int, rebuilds signal instance from components in range 0-given int
* If list of ints, rebuilds signal instance from only components in given list
Returns
-------
Signal instance
A model built from the given components.
"""
rec = self._calculate_recmatrix(components=components, mva_type="decomposition")
return rec
def get_bss_model(self, components=None, chunks="auto"):
"""Generate model with the selected number of independent components.
Parameters
----------
components : {None, int, list of ints}, default None
* If None, rebuilds signal instance from all components
* If int, rebuilds signal instance from components in range 0-given int
* If list of ints, rebuilds signal instance from only components in given list
Returns
-------
Signal instance
A model built from the given components.
"""
lr = self.learning_results
if self._lazy:
if isinstance(lr.bss_factors, np.ndarray):
lr.factors = da.from_array(lr.bss_factors, chunks=chunks)
if isinstance(lr.bss_factors, np.ndarray):
lr.loadings = da.from_array(lr.bss_loadings, chunks=chunks)
rec = self._calculate_recmatrix(components=components, mva_type="bss")
return rec
def get_explained_variance_ratio(self):
"""Return explained variance ratio of the PCA components as a Signal1D.
Read more in the :ref:`User Guide <mva.scree_plot>`.
Returns
-------
s : Signal1D
Explained variance ratio.
See Also
--------
* :py:meth:`~.learn.mva.MVA.decomposition`
* :py:meth:`~.learn.mva.MVA.plot_explained_variance_ratio`
* :py:meth:`~.learn.mva.MVA.get_decomposition_loadings`
* :py:meth:`~.learn.mva.MVA.get_decomposition_factors`
"""
from hyperspy._signals.signal1d import Signal1D
target = self.learning_results
if target.explained_variance_ratio is None:
raise AttributeError(
"The explained_variance_ratio attribute is "
"`None`, did you forget to perform a PCA "
"decomposition?"
)
s = Signal1D(target.explained_variance_ratio)
s.metadata.General.title = self.metadata.General.title + "\nPCA Scree Plot"
s.axes_manager[-1].name = "Principal component index"
s.axes_manager[-1].units = ""
return s
def plot_explained_variance_ratio(
self,
n=30,
log=True,
threshold=0,
hline="auto",
vline=False,
xaxis_type="index",
xaxis_labeling=None,
signal_fmt=None,
noise_fmt=None,
fig=None,
ax=None,
**kwargs,
):
"""Plot the decomposition explained variance ratio vs index number.
This is commonly known as a scree plot.
Read more in the :ref:`User Guide <mva.scree_plot>`.
Parameters
----------
n : int or None
Number of components to plot. If None, all components will be plot
log : bool, default True
If True, the y axis uses a log scale.
threshold : float or int
Threshold used to determine how many components should be
highlighted as signal (as opposed to noise).
If a float (between 0 and 1), ``threshold`` will be
interpreted as a cutoff value, defining the variance at which to
draw a line showing the cutoff between signal and noise;
the number of signal components will be automatically determined
by the cutoff value.
If an int, ``threshold`` is interpreted as the number of
components to highlight as signal (and no cutoff line will be
drawn)
hline: {'auto', True, False}
Whether or not to draw a horizontal line illustrating the variance
cutoff for signal/noise determination. Default is to draw the line
at the value given in ``threshold`` (if it is a float) and not
draw in the case ``threshold`` is an int, or not given.
If True, (and ``threshold`` is an int), the line will be drawn
through the last component defined as signal.
If False, the line will not be drawn in any circumstance.
vline: bool, default False
Whether or not to draw a vertical line illustrating an estimate of
the number of significant components. If True, the line will be
drawn at the the knee or elbow position of the curve indicating the
number of significant components.
If False, the line will not be drawn in any circumstance.
xaxis_type : {'index', 'number'}
Determines the type of labeling applied to the x-axis.
If ``'index'``, axis will be labeled starting at 0 (i.e.
"pythonic index" labeling); if ``'number'``, it will start at 1
(number labeling).
xaxis_labeling : {'ordinal', 'cardinal', None}
Determines the format of the x-axis tick labels. If ``'ordinal'``,
"1st, 2nd, ..." will be used; if ``'cardinal'``, "1, 2,
..." will be used. If None, an appropriate default will be
selected.
signal_fmt : dict
Dictionary of matplotlib formatting values for the signal
components
noise_fmt : dict
Dictionary of matplotlib formatting values for the noise
components
fig : matplotlib figure or None
If None, a default figure will be created, otherwise will plot
into fig
ax : matplotlib ax (subplot) or None
If None, a default ax will be created, otherwise will plot into ax
**kwargs
remaining keyword arguments are passed to ``matplotlib.figure()``
Returns
-------
ax : matplotlib.axes
Axes object containing the scree plot
Example
-------
To generate a scree plot with customized symbols for signal vs.
noise components and a modified cutoff threshold value:
>>> s = hs.load("some_spectrum_image")
>>> s.decomposition()
>>> s.plot_explained_variance_ratio(n=40,
>>> threshold=0.005,
>>> signal_fmt={'marker': 'v',
>>> 's': 150,
>>> 'c': 'pink'}
>>> noise_fmt={'marker': '*',
>>> 's': 200,
>>> 'c': 'green'})
See Also
--------
* :py:meth:`~.learn.mva.MVA.decomposition`
* :py:meth:`~.learn.mva.MVA.get_explained_variance_ratio`
* :py:meth:`~.signal.MVATools.get_decomposition_loadings`
* :py:meth:`~.signal.MVATools.get_decomposition_factors`
"""
s = self.get_explained_variance_ratio()
n_max = len(self.learning_results.explained_variance_ratio)
if n is None:
n = n_max
elif n > n_max:
_logger.info("n is too large, setting n to its maximal value.")
n = n_max
# Determine right number of components for signal and cutoff value
if isinstance(threshold, float):
if not 0 < threshold < 1:
raise ValueError("Variance threshold should be between 0 and" " 1")
# Catch if the threshold is less than the minimum variance value:
if threshold < s.data.min():
n_signal_pcs = n
else:
n_signal_pcs = np.where((s < threshold).data)[0][0]
else:
n_signal_pcs = threshold
if n_signal_pcs == 0:
hline = False
if vline:
if self.learning_results.number_significant_components is None:
vline = False
else:
index_number_significant_components = (
self.learning_results.number_significant_components - 1
)
else:
vline = False
# Handling hline logic
if hline == "auto":
# Set cutoff to threshold if float
if isinstance(threshold, float):
cutoff = threshold
# Turn off the hline otherwise
else:
hline = False
# If hline is True and threshold is int, set cutoff at value of last
# signal component
elif hline:
if isinstance(threshold, float):
cutoff = threshold
elif n_signal_pcs > 0:
cutoff = s.data[n_signal_pcs - 1]
# Catches hline==False and hline==True (if threshold not given)
else:
hline = False
# Some default formatting for signal markers
if signal_fmt is None:
signal_fmt = {
"c": "#C24D52",
"linestyle": "",
"marker": "^",
"markersize": 10,
"zorder": 3,
}
# Some default formatting for noise markers
if noise_fmt is None:
noise_fmt = {
"c": "#4A70B0",
"linestyle": "",
"marker": "o",
"markersize": 10,
"zorder": 3,
}
# Sane defaults for xaxis labeling
if xaxis_labeling is None:
xaxis_labeling = "cardinal" if xaxis_type == "index" else "ordinal"
axes_titles = {
"y": "Proportion of variance",
"x": f"Principal component {xaxis_type}",
}
if n < s.axes_manager[-1].size:
s = s.isig[:n]
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
if log:
ax.set_yscale("log")
if hline:
ax.axhline(cutoff, linewidth=2, color="gray", linestyle="dashed", zorder=1)
if vline:
ax.axvline(
index_number_significant_components,
linewidth=2,
color="gray",
linestyle="dashed",
zorder=1,
)
index_offset = 0
if xaxis_type == "number":
index_offset = 1
if n_signal_pcs == n:
ax.plot(
range(index_offset, index_offset + n), s.isig[:n].data, **signal_fmt
)
elif n_signal_pcs > 0:
ax.plot(
range(index_offset, index_offset + n_signal_pcs),
s.isig[:n_signal_pcs].data,
**signal_fmt,
)
ax.plot(
range(index_offset + n_signal_pcs, index_offset + n),
s.isig[n_signal_pcs:n].data,
**noise_fmt,
)
else:
ax.plot(range(index_offset, index_offset + n), s.isig[:n].data, **noise_fmt)
if xaxis_labeling == "cardinal":
ax.xaxis.set_major_formatter(FuncFormatter(lambda x, p: ordinal(x)))
ax.set_ylabel(axes_titles["y"])
ax.set_xlabel(axes_titles["x"])
ax.xaxis.set_major_locator(MaxNLocator(integer=True, min_n_ticks=1))
ax.margins(0.05)
ax.autoscale()
ax.set_title(s.metadata.General.title, y=1.01)
return ax
def plot_cumulative_explained_variance_ratio(self, n=50):
"""Plot cumulative explained variance up to n principal components.
Parameters
----------
n : int
Number of principal components to show.
Returns
-------
ax : matplotlib.axes
Axes object containing the cumulative explained variance plot.
See Also
--------
:py:meth:`~.learn.mva.MVA.plot_explained_variance_ratio`,
"""
target = self.learning_results
if n > target.explained_variance.shape[0]:
n = target.explained_variance.shape[0]
cumu = np.cumsum(target.explained_variance) / np.sum(target.explained_variance)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(n), cumu[:n])
ax.set_xlabel("Principal component")
ax.set_ylabel("Cumulative explained variance ratio")
plt.draw()
return ax
def normalize_poissonian_noise(self, navigation_mask=None, signal_mask=None):
"""Normalize the signal under the assumption of Poisson noise.
Scales the signal using to "normalize" the Poisson data for
subsequent decomposition analysis [Keenan2004]_.
Parameters
----------
navigation_mask : {None, boolean numpy array}, default None
Optional mask applied in the navigation axis.
signal_mask : {None, boolean numpy array}, default None
Optional mask applied in the signal axis.
"""
_logger.info("Scaling the data to normalize Poissonian noise")
with self.unfolded():
# The rest of the code assumes that the first data axis
# is the navigation axis. We transpose the data if that
# is not the case.
if self.axes_manager[0].index_in_array == 0:
dc = self.data
else:
dc = self.data.T
if navigation_mask is None:
navigation_mask = slice(None)
else:
navigation_mask = ~navigation_mask.ravel()
if signal_mask is None:
signal_mask = slice(None)
else:
signal_mask = ~signal_mask
# Check non-negative
if dc[:, signal_mask][navigation_mask, :].min() < 0.0:
raise ValueError(
"Negative values found in data!\n"
"Are you sure that the data follow a Poisson distribution?"
)
# Rescale the data to normalize the Poisson noise
aG = dc[:, signal_mask][navigation_mask, :].sum(1).squeeze()
bH = dc[:, signal_mask][navigation_mask, :].sum(0).squeeze()
self._root_aG = np.sqrt(aG)[:, np.newaxis]
self._root_bH = np.sqrt(bH)[np.newaxis, :]
# We ignore numpy's warning when the result of an
# operation produces nans - instead we set 0/0 = 0
with np.errstate(divide="ignore", invalid="ignore"):
dc[:, signal_mask][navigation_mask, :] /= self._root_aG * self._root_bH
dc[:, signal_mask][navigation_mask, :] = np.nan_to_num(
dc[:, signal_mask][navigation_mask, :]
)
def undo_treatments(self):
"""Undo Poisson noise normalization and other pre-treatments.
Only valid if calling ``s.decomposition(..., copy=True)``.
"""
if hasattr(self, "_data_before_treatments"):
_logger.info("Undoing data pre-treatments")
self.data[:] = self._data_before_treatments
del self._data_before_treatments
else:
raise AttributeError(
"Unable to undo data pre-treatments! Be sure to"
"set `copy=True` when calling s.decomposition()."
)
def estimate_elbow_position(self, explained_variance_ratio=None, max_points=20):
"""Estimate the elbow position of a scree plot curve.
Used to estimate the number of significant components in
a PCA variance ratio plot or other "elbow" type curves.
Find a line between first and last point on the scree plot.
With a classic elbow scree plot, this line more or less
defines a triangle. The elbow should be the point which
is the furthest distance from this line. For more details,
see [Satopää2011]_.
Parameters
----------
explained_variance_ratio : {None, numpy array}
Explained variance ratio values that form the scree plot.
If None, uses the ``explained_variance_ratio`` array stored
in ``s.learning_results``, so a decomposition must have
been performed first.
max_points : int
Maximum number of points to consider in the calculation.
Returns
-------
elbow position : int
Index of the elbow position in the input array. Due to
zero-based indexing, the number of significant components
is `elbow_position + 1`.
References
----------
.. [Satopää2011] V. Satopää, J. Albrecht, D. Irwin, and B. Raghavan.
"Finding a “Kneedle” in a Haystack: Detecting Knee Points in
System Behavior,. 31st International Conference on Distributed
Computing Systems Workshops, pp. 166-171, June 2011.
See Also
--------
* :py:meth:`~.learn.mva.MVA.get_explained_variance_ratio`,
* :py:meth:`~.learn.mva.MVA.plot_explained_variance_ratio`,
"""
if explained_variance_ratio is None:
if self.learning_results.explained_variance_ratio is None:
raise ValueError(
"A decomposition must be performed before calling "
"estimate_elbow_position(), or pass a numpy array directly."
)
curve_values = self.learning_results.explained_variance_ratio
else:
curve_values = explained_variance_ratio
max_points = min(max_points, len(curve_values) - 1)
# Clipping the curve_values from below with a v.small
# number avoids warnings below when taking np.log(0)
curve_values_adj = np.clip(curve_values, 1e-30, None)
x1 = 0
x2 = max_points
y1 = np.log(curve_values_adj[0])
y2 = np.log(curve_values_adj[max_points])
xs = np.arange(max_points)
ys = np.log(curve_values_adj[:max_points])
numer = np.abs((x2 - x1) * (y1 - ys) - (x1 - xs) * (y2 - y1))
denom = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
distance = np.nan_to_num(numer / denom)
# Point with the largest distance is the "elbow"
# (remember that np.argmax returns the FIRST instance)
elbow_position = np.argmax(distance)
return elbow_position
class LearningResults(object):
"""Stores the parameters and results from a decomposition."""
# Decomposition
factors = None
loadings = None
explained_variance = None
explained_variance_ratio = None
number_significant_components = None
decomposition_algorithm = None
poissonian_noise_normalized = None
output_dimension = None
mean = None
centre = None
# Unmixing
bss_algorithm = None
unmixing_matrix = None
bss_factors = None
bss_loadings = None
# Shape
unfolded = None
original_shape = None
# Masks
navigation_mask = None
signal_mask = None
def save(self, filename, overwrite=None):
"""Save the result of the decomposition and demixing analysis.
Parameters
----------
filename : string
Path to save the results to.
overwrite : {True, False, None}, default None
If True, overwrite the file if it exists.
If None (default), prompt user if file exists.
"""
kwargs = {}
for attribute in [
v
for v in dir(self)
if not isinstance(getattr(self, v), types.MethodType)
and not v.startswith("_")
]:
kwargs[attribute] = self.__getattribute__(attribute)
# Check overwrite
if overwrite is None:
overwrite = io_tools.overwrite(filename)
# Save, if all went well!
if overwrite:
np.savez(filename, **kwargs)
_logger.info(f"Saved results to {filename}")
def load(self, filename):
"""Load the results of a previous decomposition and demixing analysis.
Parameters
----------
filename : string
Path to load the results from.
"""
decomposition = np.load(filename, allow_pickle=True)
for key, value in decomposition.items():
if value.dtype == np.dtype("object"):
value = None
# Unwrap values stored as 0D numpy arrays to raw datatypes
if isinstance(value, np.ndarray) and value.ndim == 0:
value = value.item()
setattr(self, key, value)
_logger.info(f"Loaded results from {filename}")
# For compatibility with old version
if hasattr(self, "algorithm"):
self.decomposition_algorithm = self.algorithm
del self.algorithm
if hasattr(self, "V"):
self.explained_variance = self.V
del self.V
if hasattr(self, "w"):
self.unmixing_matrix = self.w
del self.w
if hasattr(self, "variance2one"):
del self.variance2one
if hasattr(self, "centered"):
del self.centered
if hasattr(self, "pca_algorithm"):
self.decomposition_algorithm = self.pca_algorithm
del self.pca_algorithm
if hasattr(self, "ica_algorithm"):
self.bss_algorithm = self.ica_algorithm
del self.ica_algorithm
if hasattr(self, "v"):
self.loadings = self.v
del self.v
if hasattr(self, "scores"):
self.loadings = self.scores
del self.scores
if hasattr(self, "pc"):
self.loadings = self.pc
del self.pc
if hasattr(self, "ica_scores"):
self.bss_loadings = self.ica_scores
del self.ica_scores
if hasattr(self, "ica_factors"):
self.bss_factors = self.ica_factors
del self.ica_factors
# Log summary
self.summary()
def __repr__(self):
"""Summarize the decomposition and demixing parameters."""
return self.summary()
def summary(self):
"""Summarize the decomposition and demixing parameters.
Returns
-------
str
String summarizing the learning parameters.
"""
summary_str = (
"Decomposition parameters\n"
"------------------------\n"
f"normalize_poissonian_noise={self.poissonian_noise_normalized}\n"
f"algorithm={self.decomposition_algorithm}\n"
f"output_dimension={self.output_dimension}\n"
f"centre={self.centre}"
)
if self.bss_algorithm is not None:
summary_str += (
"\n\nDemixing parameters\n"
"-------------------\n"
f"algorithm={self.bss_algorithm}\n"
f"n_components={len(self.unmixing_matrix)}"
)
_logger.info(summary_str)
return summary_str
def crop_decomposition_dimension(self, n, compute=False):
"""Crop the score matrix up to the given number.
It is mainly useful to save memory and reduce the storage size
Parameters
----------
n : int
Number of components to keep.
compute : bool, default False
If True and the decomposition results are lazy,
also compute the results.
"""
_logger.info(f"Trimming results to {n} dimensions")
self.loadings = self.loadings[:, :n]
if self.explained_variance is not None:
self.explained_variance = self.explained_variance[:n]
self.factors = self.factors[:, :n]
if compute:
self.loadings = self.loadings.compute()
self.factors = self.factors.compute()
if self.explained_variance is not None:
self.explained_variance = self.explained_variance.compute()
def _transpose_results(self):
(self.factors, self.loadings, self.bss_factors, self.bss_loadings) = (
self.loadings,
self.factors,
self.bss_loadings,
self.bss_factors,
)
|
pcchenxi/baseline
|
refs/heads/master
|
baselines/common/cg.py
|
10
|
import numpy as np
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x
|
ekiwi/tinyos-1.x
|
refs/heads/master
|
contrib/ucb/apps/Monstro/lib/Robot/Util.py
|
2
|
import os, re, time, Config
def findMotes() :
comList = []
moteList = os.popen("motelist").readlines()
if len(moteList) > 2 :
moteList = moteList[2:]
for moteDesc in moteList :
if Config.PLATFORM == "win32" :
results = re.search( "COM(?P<comNum>\d+)\s+Telos" , moteDesc )
if results :
comList.append( "serial@COM%s:telos" % results.group("comNum") )
elif Config.PLATFORM == "linux" :
results = re.search( "\s+(?P<dev>/dev/[^\s]+)\s+" , moteDesc )
if results :
comList.append( "serial@%s:telos" % results.group("dev") )
return comList
def getResources( dummy = False , retryPeriod = 0.5 ) :
"""Set retryPerid = 0 to only try once"""
resources = {}
# --------------------------------------------------
# Add GPS
# --------------------------------------------------
import Gps
if Config.IS_MONSTRO :
resources.update({ "gps" : Gps.getGps( dummy = dummy ) })
else :
resources.update({ "gps" : Gps.getGps( dummy = True ) })
if not dummy :
# --------------------------------------------------
# Add roboMote
# --------------------------------------------------
import RoboMote
# find the com ports associated with motes
done = False
while not done :
moteComs = findMotes()
if (not moteComs) and (retryPeriod > 0) :
time.sleep(retryPeriod)
continue
roboMote = None
for i in range(len(moteComs)-1,-1,-1) :
moteCom = moteComs[i]
if RoboMote.isRoboMote( moteCom ) :
del moteComs[i]
roboMote = RoboMote.RoboMote( moteCom )
done = True
break
if not roboMote :
if ( retryPeriod > 0 ) :
time.sleep(retryPeriod)
continue
else :
raise RoboMote.RoboMoteException , "Could not connect to the mote providing RoboMote"
# Add the roboMote to the resource list
resources.update({ "roboMote" : roboMote })
return resources
if __name__ == "__main__" :
print findMotes()
|
zhjunlang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/unittest/main.py
|
84
|
"""Unittest main program"""
import sys
import argparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
MAIN_EXAMPLES = """\
Examples:
%(prog)s test_module - run tests from test_module
%(prog)s module.TestClass - run tests from module.TestClass
%(prog)s module.Class.test_method - run specified test method
"""
MODULE_EXAMPLES = """\
Examples:
%(prog)s - run default set of tests
%(prog)s MyTestSuite - run suite 'MyTestSuite'
%(prog)s MyTestCase.testSomething - run MyTestCase.testSomething
%(prog)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
# defaults for testing
module=None
verbosity = 1
failfast = catchbreak = buffer = progName = warnings = None
_discovery_parser = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
if self._discovery_parser is None:
self._initArgParsers()
self._print_help()
sys.exit(2)
def _print_help(self, *args, **kwargs):
if self.module is None:
print(self._main_parser.format_help())
print(MAIN_EXAMPLES % {'prog': self.progName})
self._discovery_parser.print_help()
else:
print(self._main_parser.format_help())
print(MODULE_EXAMPLES % {'prog': self.progName})
def parseArgs(self, argv):
self._initArgParsers()
if self.module is None:
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
self._main_parser.parse_args(argv[1:], self)
if not self.tests:
# this allows "python -m unittest -v" to still work for
# test discovery.
self._do_discovery([])
return
else:
self._main_parser.parse_args(argv[1:], self)
if self.tests:
self.testNames = _convert_names(self.tests)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
elif self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif isinstance(self.defaultTest, str):
self.testNames = (self.defaultTest,)
else:
self.testNames = list(self.defaultTest)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _initArgParsers(self):
parent_parser = self._getParentArgParser()
self._main_parser = self._getMainArgParser(parent_parser)
self._discovery_parser = self._getDiscoveryArgParser(parent_parser)
def _getParentArgParser(self):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-v', '--verbose', dest='verbosity',
action='store_const', const=2,
help='Verbose output')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0,
help='Quiet output')
if self.failfast is None:
parser.add_argument('-f', '--failfast', dest='failfast',
action='store_true',
help='Stop on first fail or error')
self.failfast = False
if self.catchbreak is None:
parser.add_argument('-c', '--catch', dest='catchbreak',
action='store_true',
help='Catch ctrl-C and display results so far')
self.catchbreak = False
if self.buffer is None:
parser.add_argument('-b', '--buffer', dest='buffer',
action='store_true',
help='Buffer stdout and stderr during tests')
self.buffer = False
return parser
def _getMainArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = self.progName
parser.print_help = self._print_help
parser.add_argument('tests', nargs='*',
help='a list of any number of test modules, '
'classes and test methods.')
return parser
def _getDiscoveryArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = '%s discover' % self.progName
parser.epilog = ('For test discovery all test modules must be '
'importable from the top level directory of the '
'project.')
parser.add_argument('-s', '--start-directory', dest='start',
help="Directory to start discovery ('.' default)")
parser.add_argument('-p', '--pattern', dest='pattern',
help="Pattern to match tests ('test*.py' default)")
parser.add_argument('-t', '--top-level-directory', dest='top',
help='Top level directory of project (defaults to '
'start directory)')
for arg in ('start', 'pattern', 'top'):
parser.add_argument(arg, nargs='?',
default=argparse.SUPPRESS,
help=argparse.SUPPRESS)
return parser
def _do_discovery(self, argv, Loader=None):
self.start = '.'
self.pattern = 'test*.py'
self.top = None
if argv is not None:
# handle command line args for test discovery
if self._discovery_parser is None:
# for testing
self._initArgParsers()
self._discovery_parser.parse_args(argv, self)
loader = self.testLoader if Loader is None else Loader()
self.test = loader.discover(self.start, self.pattern, self.top)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/scattergeo/hoverlabel/_bgcolorsrc.py
|
1
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="scattergeo.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
reyha/zulip
|
refs/heads/master
|
zilencer/management/commands/create_deployment.py
|
1
|
from __future__ import absolute_import
from __future__ import print_function
from optparse import make_option
import sys
from typing import Any
from django.core.management.base import BaseCommand, CommandParser
from zerver.models import get_realm_by_string_id
from zerver.lib.create_user import random_api_key
from zerver.management.commands.create_realm import Command as CreateRealm
from zilencer.models import Deployment
class Command(BaseCommand):
help = """Create a deployment and accompanying realm."""
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('--no-realm',
dest='no_realm',
action='store_true',
default=False,
help='Do not create a new realm; associate with '
'an existing one. In this case, only the '
'realm and URLs need to be specified.')
parser.add_argument('-a', '--api-url', dest='api', type=str)
parser.add_argument('-w', '--web-url', dest='web', type=str)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if None in (options["api"], options["web"], options["string_id"]):
print("\033[1;31mYou must provide a subdomain or string_id, an API URL, and a web URL.\033[0m\n", file=sys.stderr)
self.print_help("python manage.py", "create_realm")
exit(1)
if not options["no_realm"]:
CreateRealm().handle(*args, **options)
print() # Newline
realm = get_realm_by_string_id(options["string_id"])
if realm is None:
print("\033[1;31mRealm does not exist!\033[0m\n", file=sys.stderr)
exit(2)
dep = Deployment()
dep.api_key = random_api_key()
dep.save()
old_dep = realm.deployment
if old_dep is not None:
old_dep.realms.remove(realm)
old_dep.save()
dep.realms = [realm]
dep.base_api_url = options["api"]
dep.base_site_url = options["web"]
dep.save()
print("Deployment %s created." % (dep.id,))
print("DEPLOYMENT_ROLE_NAME = %s" % (dep.name,))
print("DEPLOYMENT_ROLE_KEY = %s" % (dep.api_key,))
|
BaladiDogGames/baladidoggames.github.io
|
refs/heads/master
|
mingw/bin/lib/code.py
|
256
|
"""Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
# Can be None if sys.stdin was redefined
encoding = getattr(sys.stdin, "encoding", None)
if encoding and not isinstance(line, unicode):
line = line.decode(encoding)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == "__main__":
interact()
|
scieloorg/elixir
|
refs/heads/master
|
elixir/utils.py
|
1
|
from io import StringIO, BytesIO
from zipfile import ZipFile
import codecs
import logging
class WrapFiles(object):
def __init__(self, *args):
self.memory_zip = BytesIO()
self.thezip = ZipFile(self.memory_zip, 'a')
if len(args) > 0:
self.append(*args)
def append(self, *args):
for item in args:
if isinstance(item, MemoryFileLike):
x = item
else:
x = codecs.open(item, 'rb')
name = x.name.split('/')[-1]
try:
self.thezip.writestr(name, x.read())
except FileNotFoundError:
logging.info('Unable to prepare zip file, file not found (%s)' % item)
raise
logging.info('Zip file prepared')
return self.thezip
def read(self):
self.thezip.close()
self.memory_zip.seek(0)
return self.memory_zip.read()
class MemoryFileLike(object):
def __init__(self, file_name, content=None, encoding='utf-8'):
if not isinstance(file_name, str):
raise TypeError('file_name must be a string')
self._encoding = encoding
self._file_name = file_name
self._content = StringIO(encoding)
if len(content):
self._content.write(content.strip())
@property
def name(self):
return self._file_name
def write(self, content):
self._content.write(content.strip())
def writelines(self, *args):
for line in args:
self._content.write('\r\n%s' % str(line).strip())
def read(self):
return self._content.getvalue()
def close(self):
self._content.close()
|
ttyangf/pdfium_gyp
|
refs/heads/master
|
test/home_dot_gyp/gyptest-home-includes-config-env.py
|
260
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies inclusion of $HOME/.gyp_new/include.gypi works when GYP_CONFIG_DIR
is set.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
os.environ['HOME'] = os.path.abspath('home')
os.environ['GYP_CONFIG_DIR'] = os.path.join(os.path.abspath('home2'),
'.gyp_new')
test.run_gyp('all.gyp', chdir='src')
# After relocating, we should still be able to build (build file shouldn't
# contain relative reference to ~/.gyp_new/include.gypi)
test.relocate('src', 'relocate/src')
test.build('all.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('printfoo',
chdir='relocate/src',
stdout='FOO is fromhome3\n')
test.pass_test()
|
festovalros/Examine_odoo8_accounting
|
refs/heads/master
|
account/project/report/__init__.py
|
427
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_journal
import analytic_balance
import inverted_analytic_balance
import cost_ledger
import quantity_cost_ledger
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sjbog/ajenti
|
refs/heads/master
|
ajenti/utils/PrioList.py
|
17
|
# encoding: utf-8
#
# Copyright (C) 2006-2010 Dmitry Zamaruev (dmitry.zamaruev@gmail.com)
from UserList import UserList
class PrioList(UserList):
def __init__(self, max_priority=100):
super(PrioList, self).__init__()
self.prio = []
self._max = max_priority
self._def = max_priority/2
def __delitem__(self, i):
del self.data[i]
del self.prio[i]
# Prohibit following operations
__setslice__ = None
__delslice__ = None
__add__ = None
__radd__ = None
__iadd__ = None
__mul__ = None
__imul__ = None
def _prio_index(self, prio):
i = None
for p, el in enumerate(self.prio):
if prio < el:
i = p
break
if i is None:
i = len(self.prio)
return i
def _append_prio(self, item, prio):
i = self._prio_index(prio)
super(PrioList, self).insert(i, item)
self.prio.insert(i, prio)
# Access methods
def append(self, item):
if isinstance(item, tuple):
self._append_prio(item[0], item[1])
else:
self._append_prio(item, self._def)
# Prohibit following methods
insert = None
pop = None
index = None
reverse = None
sort = None
extend = None
|
Chasego/codirit
|
refs/heads/master
|
leetcode/418-Sentence-Screen-Fitting/SentenceScreenFitting_001.py
|
5
|
class Solution(object):
def wordsTyping(self, sentence, rows, cols):
"""
:type sentence: List[str]
:type rows: int
:type cols: int
:rtype: int
"""
cnt = 0
start = 0
row_circ = len(''.join(sentence)) + len(sentence)
nrc = (cols + 1) / row_circ
cols = (cols + 1) % row_circ
circle = []
for i in range(rows):
j = len(sentence[start])
while j < cols:
tmp = (start + 1) % len(sentence)
if not tmp:
cnt += 1
start = tmp
j += 1 + len(sentence[start])
circle.append(cnt)
if not start:
break
ncircle = len(circle)
cnt = nrc * rows
cnt += rows/ncircle * circle[-1]
cnt += circle[rows%ncircle - 1] if rows%ncircle > 0 else 0
return cnt
|
feliperfranca/django-nonrel-example
|
refs/heads/master
|
django/utils/dates.py
|
488
|
"Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
|
yaroslavprogrammer/django
|
refs/heads/master
|
django/db/models/fields/__init__.py
|
28
|
from __future__ import unicode_literals
import copy
import datetime
import decimal
import math
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.db import connection
from django.db.models.loading import get_model
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry, total_ordering
from django.utils.itercompat import is_iterator
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text, force_bytes
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app cache to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app cache. This could
# be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_virtual_field(self)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = blank_choice if include_blank else []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if is_iterator(self._choices):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime (%s)"
" while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, verbose_name, name, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return value
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self
).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
|
40223139/2015cdaa5-12
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/this.py
|
948
|
s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
|
cogmission/nupic
|
refs/heads/master
|
external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py
|
69
|
import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
|
leeseuljeong/leeseulstack_neutron
|
refs/heads/master
|
neutron/tests/unit/oneconvergence/test_nvsdlib.py
|
8
|
# Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from oslo.serialization import jsonutils
from neutron.plugins.oneconvergence.lib import nvsdlib
from neutron.tests import base
NETWORKS_URI = "/pluginhandler/ocplugin/tenant/%s/lnetwork/"
NETWORK_URI = NETWORKS_URI + "%s"
GET_ALL_NETWORKS = "/pluginhandler/ocplugin/tenant/getallnetworks"
SUBNETS_URI = NETWORK_URI + "/lsubnet/"
SUBNET_URI = SUBNETS_URI + "%s"
GET_ALL_SUBNETS = "/pluginhandler/ocplugin/tenant/getallsubnets"
PORTS_URI = NETWORK_URI + "/lport/"
PORT_URI = PORTS_URI + "%s"
EXT_URI = "/pluginhandler/ocplugin/ext/tenant/%s"
FLOATING_IPS_URI = EXT_URI + "/floatingip/"
FLOATING_IP_URI = FLOATING_IPS_URI + "%s"
ROUTERS_URI = EXT_URI + "/lrouter/"
ROUTER_URI = ROUTERS_URI + "%s"
TEST_NET = 'test-network'
TEST_SUBNET = 'test-subnet'
TEST_PORT = 'test-port'
TEST_FIP = 'test-floatingip'
TEST_ROUTER = 'test-router'
TEST_TENANT = 'test-tenant'
class TestNVSDApi(base.BaseTestCase):
def setUp(self):
super(TestNVSDApi, self).setUp()
self.nvsdlib = nvsdlib.NVSDApi()
def test_create_network(self):
network_obj = {
"name": 'test-net',
"tenant_id": TEST_TENANT,
"shared": False,
"admin_state_up": True,
"router:external": False
}
resp = mock.Mock()
resp.json.return_value = {'id': 'uuid'}
with mock.patch.object(self.nvsdlib, 'send_request',
return_value=resp) as send_request:
uri = NETWORKS_URI % TEST_TENANT
net = self.nvsdlib.create_network(network_obj)
send_request.assert_called_once_with(
"POST", uri,
body=jsonutils.dumps(network_obj),
resource='network',
tenant_id=TEST_TENANT)
self.assertEqual(net, {'id': 'uuid'})
def test_update_network(self):
network = {'id': TEST_NET,
'tenant_id': TEST_TENANT}
update_network = {'name': 'new_name'}
uri = NETWORK_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_network(network, update_network)
send_request.assert_called_once_with(
"PUT", uri, body=jsonutils.dumps(update_network),
resource='network', tenant_id=TEST_TENANT,
resource_id=TEST_NET)
def test_delete_network(self):
network = {'id': TEST_NET,
'tenant_id': TEST_TENANT}
uri = NETWORK_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
with mock.patch.object(self.nvsdlib, '_get_ports'):
self.nvsdlib.delete_network(network)
send_request.assert_called_once_with(
"DELETE", uri, resource='network',
tenant_id=TEST_TENANT, resource_id=TEST_NET)
def test_create_port(self):
path = PORTS_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
fixed_ips = [{'ip_address': '10.0.0.2',
'subnet_id': TEST_SUBNET}]
lport = {
"id": TEST_PORT,
"name": 'test',
"device_id": "device_id",
"device_owner": "device_owner",
"mac_address": "mac_address",
"fixed_ips": fixed_ips,
"admin_state_up": True,
"network_id": TEST_NET,
"status": 'ACTIVE'
}
self.nvsdlib.create_port(TEST_TENANT, lport)
expected = {"id": TEST_PORT, "name": 'test',
"device_id": "device_id",
"device_owner": "device_owner",
"mac_address": "mac_address",
"ip_address": '10.0.0.2',
"subnet_id": TEST_SUBNET,
"admin_state_up": True,
"network_id": TEST_NET,
"status": 'ACTIVE'}
send_request.assert_called_once_with(
"POST", path,
body=jsonutils.dumps(expected),
resource='port',
tenant_id=TEST_TENANT)
def test_update_port(self):
port = {'id': TEST_PORT,
'network_id': TEST_NET}
port_update = {'name': 'new-name'}
uri = PORT_URI % (TEST_TENANT, TEST_NET, TEST_PORT)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_port(TEST_TENANT, port, port_update)
send_request.assert_called_once_with(
"PUT", uri,
body=jsonutils.dumps(port_update),
resource='port',
resource_id='test-port',
tenant_id=TEST_TENANT)
def test_delete_port(self):
port = {'network_id': TEST_NET,
'tenant_id': TEST_TENANT}
uri = PORT_URI % (TEST_TENANT, TEST_NET, TEST_PORT)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_port(TEST_PORT, port)
send_request.assert_called_once_with("DELETE", uri,
resource='port',
tenant_id=TEST_TENANT,
resource_id=TEST_PORT)
def test_create_subnet(self):
subnet = {'id': TEST_SUBNET,
'tenant_id': TEST_TENANT,
'network_id': TEST_NET}
uri = SUBNETS_URI % (TEST_TENANT, TEST_NET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.create_subnet(subnet)
send_request.assert_called_once_with("POST", uri,
body=jsonutils.dumps(subnet),
resource='subnet',
tenant_id=TEST_TENANT)
def test_update_subnet(self):
subnet = {'id': TEST_SUBNET,
'tenant_id': TEST_TENANT,
'network_id': TEST_NET}
subnet_update = {'name': 'new-name'}
uri = SUBNET_URI % (TEST_TENANT, TEST_NET, TEST_SUBNET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_subnet(subnet, subnet_update)
send_request.assert_called_once_with(
"PUT", uri,
body=jsonutils.dumps(subnet_update), resource='subnet',
tenant_id=TEST_TENANT, resource_id=TEST_SUBNET)
def test_delete_subnet(self):
subnet = {'id': TEST_SUBNET,
'tenant_id': TEST_TENANT,
'network_id': TEST_NET}
uri = SUBNET_URI % (TEST_TENANT, TEST_NET, TEST_SUBNET)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_subnet(subnet)
send_request.assert_called_once_with("DELETE", uri,
resource='subnet',
tenant_id=TEST_TENANT,
resource_id=TEST_SUBNET)
def test_create_floatingip(self):
floatingip = {'id': TEST_FIP,
'tenant_id': TEST_TENANT}
uri = FLOATING_IPS_URI % TEST_TENANT
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.create_floatingip(floatingip)
send_request.assert_called_once_with(
"POST", uri,
body=jsonutils.dumps(floatingip),
resource='floating_ip',
tenant_id=TEST_TENANT)
def test_update_floatingip(self):
floatingip = {'id': TEST_FIP,
'tenant_id': TEST_TENANT}
uri = FLOATING_IP_URI % (TEST_TENANT, TEST_FIP)
floatingip_update = {'floatingip': {'router_id': TEST_ROUTER}}
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_floatingip(floatingip, floatingip_update)
send_request.assert_called_once_with(
"PUT", uri,
body=jsonutils.dumps(floatingip_update['floatingip']),
resource='floating_ip', tenant_id=TEST_TENANT,
resource_id=TEST_FIP)
def test_delete_floatingip(self):
floatingip = {'id': TEST_FIP,
'tenant_id': TEST_TENANT}
uri = FLOATING_IP_URI % (TEST_TENANT, TEST_FIP)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_floatingip(floatingip)
send_request.assert_called_once_with(
"DELETE", uri, resource='floating_ip', tenant_id=TEST_TENANT,
resource_id=TEST_FIP)
def test_create_router(self):
router = {'id': TEST_ROUTER, 'tenant_id': TEST_TENANT}
uri = ROUTERS_URI % TEST_TENANT
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.create_router(router)
send_request.assert_called_once_with(
"POST", uri, body=jsonutils.dumps(router), resource='router',
tenant_id=TEST_TENANT)
def test_update_router(self):
router = {'id': TEST_ROUTER, 'tenant_id': TEST_TENANT}
uri = ROUTER_URI % (TEST_TENANT, TEST_ROUTER)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.update_router(router)
send_request.assert_called_once_with(
"PUT", uri, body=jsonutils.dumps(router),
resource='router', tenant_id=TEST_TENANT,
resource_id=TEST_ROUTER)
def test_delete_router(self):
uri = ROUTER_URI % (TEST_TENANT, TEST_ROUTER)
with mock.patch.object(self.nvsdlib, 'send_request') as send_request:
self.nvsdlib.delete_router(TEST_TENANT, TEST_ROUTER)
send_request.assert_called_once_with(
"DELETE", uri, resource='router',
tenant_id=TEST_TENANT, resource_id=TEST_ROUTER)
|
biviosoftware/macutil
|
refs/heads/master
|
tests/test_rename.py
|
1
|
import macutil.dropbox_photos_rename
import os
import subprocess
import time
import datetime
_tmp = os.path.join(os.environ['PWD'], 'tmp')
subprocess.call(['rm', '-rf', _tmp])
os.mkdir(_tmp)
_dropbox = os.path.join(_tmp, 'Dropbox')
os.mkdir(_dropbox)
_photos = os.path.join(_dropbox, 'Photos')
os.mkdir(_photos)
_camera_uploads = os.path.join(_dropbox, 'Camera Uploads')
os.mkdir(_camera_uploads)
os.environ["HOME"] = _tmp
def create_file(f,mtime=None):
p = os.path.join(_camera_uploads, f)
fh = open(p, 'w')
fh.write('')
fh.close()
if mtime:
mtime = time.mktime(datetime.datetime.strptime(mtime, "%Y%m%d").timetuple())
os.utime(p, (mtime, mtime))
return p
def test_1():
p = create_file('x.txt', '20141223')
macutil.dropbox_photos_rename.move_one(p)
assert os.path.isfile(os.path.join(_photos, '2014/12-23', 'x.txt'))
p = create_file('2014-12-16-hello.txt', '20141223')
macutil.dropbox_photos_rename.move_one(p)
assert os.path.isfile(os.path.join(_photos, '2014/12-16', '2014-12-16-hello.txt'))
p = create_file('2014-12-16-hello.txt', '20141223')
macutil.dropbox_photos_rename.move_one(p)
assert os.path.isfile(os.path.join(_photos, '2014/12-16', '2014-12-16-hello.txt'))
p = create_file('2014-12-11-UPPER.JPEG')
macutil.dropbox_photos_rename.move_one(p)
assert os.path.isfile(os.path.join(_photos, '2014/12-11', '2014-12-11-upper.jpg'))
|
HonzaKral/django
|
refs/heads/master
|
tests/admin_autodiscover/tests.py
|
526
|
from unittest import TestCase
from django.contrib import admin
class AdminAutoDiscoverTests(TestCase):
"""
Test for bug #8245 - don't raise an AlreadyRegistered exception when using
autodiscover() and an admin.py module contains an error.
"""
def test_double_call_autodiscover(self):
# The first time autodiscover is called, we should get our real error.
with self.assertRaises(Exception) as cm:
admin.autodiscover()
self.assertEqual(str(cm.exception), "Bad admin module")
# Calling autodiscover again should raise the very same error it did
# the first time, not an AlreadyRegistered error.
with self.assertRaises(Exception) as cm:
admin.autodiscover()
self.assertEqual(str(cm.exception), "Bad admin module")
|
pkoutsias/SickRage
|
refs/heads/master
|
sickbeard/helpers.py
|
1
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import io
import ctypes
import random
import re
import socket
import stat
import tempfile
import time
import traceback
import urllib
import urllib2
import hashlib
import httplib
import urlparse
import uuid
import base64
import zipfile
import datetime
import errno
import ast
import operator
import platform
import sickbeard
import adba
import requests
import certifi
from contextlib import closing
from socket import timeout as SocketTimeout
from sickbeard import logger, classes
from sickbeard.common import USER_AGENT
from sickbeard import db
from sickbeard.notifiers import synoindex_notifier
from sickrage.helper.common import http_code_description, media_extensions, pretty_file_size, subtitle_extensions
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickrage.show.Show import Show
from itertools import izip, cycle
import shutil
import shutil_custom
import xml.etree.ElementTree as ET
import json
shutil.copyfile = shutil_custom.copyfile_custom
# pylint: disable=protected-access
# Access to a protected member of a client class
urllib._urlopener = classes.SickBeardURLopener()
def fixGlob(path):
path = re.sub(r'\[', '[[]', path)
return re.sub(r'(?<!\[)\]', '[]]', path)
def indentXML(elem, level=0):
"""
Does our pretty printing, makes Matt very happy
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indentXML(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def remove_non_release_groups(name):
"""
Remove non release groups from name
"""
if not name:
return name
# Do not remove all [....] suffixes, or it will break anime releases ## Need to verify this is true now
# Check your database for funky release_names and add them here, to improve failed handling, archiving, and history.
# select release_name from tv_episodes WHERE LENGTH(release_name);
# [eSc], [SSG], [GWC] are valid release groups for non-anime
removeWordsList = {
r'\[rartv\]$': 'searchre',
r'\[rarbg\]$': 'searchre',
r'\[eztv\]$': 'searchre',
r'\[ettv\]$': 'searchre',
r'\[cttv\]$': 'searchre',
r'\[vtv\]$': 'searchre',
r'\[EtHD\]$': 'searchre',
r'\[GloDLS\]$': 'searchre',
r'\[silv4\]$': 'searchre',
r'\[Seedbox\]$': 'searchre',
r'\[PublicHD\]$': 'searchre',
r'\[AndroidTwoU\]$': 'searchre',
r'\[brassetv]\]$': 'searchre',
r'\.\[BT\]$': 'searchre',
r' \[1044\]$': 'searchre',
r'\.RiPSaLoT$': 'searchre',
r'\.GiuseppeTnT$': 'searchre',
r'\.Renc$': 'searchre',
r'\.gz$': 'searchre',
r'(?<![57])\.1$': 'searchre',
r'-NZBGEEK$': 'searchre',
r'-Siklopentan$': 'searchre',
r'-Chamele0n$': 'searchre',
r'-Obfuscated$': 'searchre',
r'-\[SpastikusTV\]$': 'searchre',
r'-RP$': 'searchre',
r'-20-40$': 'searchre',
r'\.\[www\.usabit\.com\]$': 'searchre',
r'^\[www\.Cpasbien\.pe\] ': 'searchre',
r'^\[www\.Cpasbien\.com\] ': 'searchre',
r'^\[ www\.Cpasbien\.pw \] ': 'searchre',
r'^\.www\.Cpasbien\.pw': 'searchre',
r'^\[www\.newpct1\.com\]': 'searchre',
r'^\[ www\.Cpasbien\.com \] ': 'searchre',
r'- \{ www\.SceneTime\.com \}$': 'searchre',
r'^\{ www\.SceneTime\.com \} - ': 'searchre',
r'^\]\.\[www\.tensiontorrent.com\] - ': 'searchre',
r'^\]\.\[ www\.tensiontorrent.com \] - ': 'searchre',
r'- \[ www\.torrentday\.com \]$': 'searchre',
r'^\[ www\.TorrentDay\.com \] - ': 'searchre',
r'\[NO-RAR\] - \[ www\.torrentday\.com \]$': 'searchre',
}
_name = name
for remove_string, remove_type in removeWordsList.iteritems():
if remove_type == 'search':
_name = _name.replace(remove_string, '')
elif remove_type == 'searchre':
_name = re.sub(r'(?i)' + remove_string, '', _name)
return _name
def isMediaFile(filename):
"""
Check if named file may contain media
:param filename: Filename to check
:return: True if this is a known media file, False if not
"""
# ignore samples
try:
if re.search(r'(^|[\W_])(?<!shomin.)(sample\d*)[\W_]', filename, re.I):
return False
# ignore RARBG release intro
if re.search(r'^RARBG\.\w+\.(mp4|avi|txt)$', filename, re.I):
return False
# ignore MAC OS's retarded "resource fork" files
if filename.startswith('._'):
return False
sepFile = filename.rpartition(".")
if re.search('extras?$', sepFile[0], re.I):
return False
if sepFile[2].lower() in media_extensions:
return True
else:
return False
except TypeError as error: # Not a string
logger.log('Invalid filename. Filename must be a string. %s' % error, logger.DEBUG) # pylint: disable=no-member
return False
def isRarFile(filename):
"""
Check if file is a RAR file, or part of a RAR set
:param filename: Filename to check
:return: True if this is RAR/Part file, False if not
"""
archive_regex = r'(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
if re.search(archive_regex, filename):
return True
return False
def isBeingWritten(filepath):
"""
Check if file has been written in last 60 seconds
:param filepath: Filename to check
:return: True if file has been written recently, False if none
"""
# Return True if file was modified within 60 seconds. it might still be being written to.
ctime = max(ek(os.path.getctime, filepath), ek(os.path.getmtime, filepath))
if ctime > time.time() - 60:
return True
return False
def remove_file_failed(failed_file):
"""
Remove file from filesystem
:param file: File to remove
"""
try:
ek(os.remove, failed_file)
except Exception:
pass
def makeDir(path):
"""
Make a directory on the filesystem
:param path: directory to make
:return: True if success, False if failure
"""
if not ek(os.path.isdir, path):
try:
ek(os.makedirs, path)
# do the library update for synoindex
synoindex_notifier.addFolder(path)
except OSError:
return False
return True
def searchIndexerForShowID(regShowName, indexer=None, indexer_id=None, ui=None):
"""
Contacts indexer to check for information on shows by showid
:param regShowName: Name of show
:param indexer: Which indexer to use
:param indexer_id: Which indexer ID to look for
:param ui: Custom UI for indexer use
:return:
"""
showNames = [re.sub('[. -]', ' ', regShowName)]
# Query Indexers for each search term and build the list of results
for i in sickbeard.indexerApi().indexers if not indexer else int(indexer or []):
# Query Indexers for each search term and build the list of results
lINDEXER_API_PARMS = sickbeard.indexerApi(i).api_params.copy()
if ui is not None:
lINDEXER_API_PARMS['custom_ui'] = ui
t = sickbeard.indexerApi(i).indexer(**lINDEXER_API_PARMS)
for name in showNames:
logger.log(u"Trying to find " + name + " on " + sickbeard.indexerApi(i).name, logger.DEBUG)
try:
search = t[indexer_id] if indexer_id else t[name]
except Exception:
continue
try:
seriesname = search[0]['seriesname']
except Exception:
seriesname = None
try:
series_id = search[0]['id']
except Exception:
series_id = None
if not (seriesname and series_id):
continue
ShowObj = Show.find(sickbeard.showList, int(series_id))
# Check if we can find the show in our list (if not, it's not the right show)
if (indexer_id is None) and (ShowObj is not None) and (ShowObj.indexerid == int(series_id)):
return seriesname, i, int(series_id)
elif (indexer_id is not None) and (int(indexer_id) == int(series_id)):
return seriesname, i, int(indexer_id)
if indexer:
break
return None, None, None
def listMediaFiles(path):
"""
Get a list of files possibly containing media in a path
:param path: Path to check for files
:return: list of files
"""
if not dir or not ek(os.path.isdir, path):
return []
files = []
for curFile in ek(os.listdir, path):
fullCurFile = ek(os.path.join, path, curFile)
# if it's a folder do it recursively
if ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
files += listMediaFiles(fullCurFile)
elif isMediaFile(curFile):
files.append(fullCurFile)
return files
def copyFile(srcFile, destFile):
"""
Copy a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
ek(shutil.copyfile, srcFile, destFile)
try:
ek(shutil.copymode, srcFile, destFile)
except OSError:
pass
def moveFile(srcFile, destFile):
"""
Move a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
try:
ek(shutil.move, srcFile, destFile)
fixSetGroupID(destFile)
except OSError:
copyFile(srcFile, destFile)
ek(os.unlink, srcFile)
def link(src, dst):
"""
Create a file link from source to destination.
TODO: Make this unicode proof
:param src: Source file
:param dst: Destination file
"""
if os.name == 'nt':
if ctypes.windll.kernel32.CreateHardLinkW(unicode(dst), unicode(src), 0) == 0:
raise ctypes.WinError()
else:
ek(os.link, src, dst)
def hardlinkFile(srcFile, destFile):
"""
Create a hard-link (inside filesystem link) between source and destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
ek(link, srcFile, destFile)
fixSetGroupID(destFile)
except Exception as e:
logger.log(u"Failed to create hardlink of %s at %s. Error: %r. Copying instead"
% (srcFile, destFile, ex(e)), logger.WARNING)
copyFile(srcFile, destFile)
def symlink(src, dst):
"""
Create a soft/symlink between source and destination
:param src: Source file
:param dst: Destination file
"""
if os.name == 'nt':
if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if ek(os.path.isdir, src) else 0) in [0, 1280]:
raise ctypes.WinError()
else:
ek(os.symlink, src, dst)
def moveAndSymlinkFile(srcFile, destFile):
"""
Move a file from source to destination, then create a symlink back from destination from source. If this fails, copy
the file from source to destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
ek(shutil.move, srcFile, destFile)
fixSetGroupID(destFile)
ek(symlink, destFile, srcFile)
except Exception as e:
logger.log(u"Failed to create symlink of %s at %s. Error: %r. Copying instead"
% (srcFile, destFile, ex(e)), logger.WARNING)
copyFile(srcFile, destFile)
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log(u"Checking if the path %s already exists" % path, logger.DEBUG)
if not ek(os.path.isdir, path):
# Windows, create all missing folders
if os.name == 'nt' or os.name == 'ce':
try:
logger.log(u"Folder %s didn't exist, creating it" % path, logger.DEBUG)
ek(os.makedirs, path)
except (OSError, IOError) as e:
logger.log(u"Failed creating %s : %r" % (path, ex(e)), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep
# if it exists then just keep walking down the line
if ek(os.path.isdir, sofar):
continue
try:
logger.log(u"Folder %s didn't exist, creating it" % sofar, logger.DEBUG)
ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek(os.path.normpath, sofar))
# do the library update for synoindex
synoindex_notifier.addFolder(sofar)
except (OSError, IOError) as e:
logger.log(u"Failed creating %s : %r" % (sofar, ex(e)), logger.ERROR)
return False
return True
def rename_ep_file(cur_path, new_path, old_path_length=0):
"""
Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
left that are now empty.
:param cur_path: The absolute path to the file you want to move/rename
:param new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
:param old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
"""
# new_dest_dir, new_dest_name = ek(os.path.split, new_path) # @UnusedVariable
if old_path_length == 0 or old_path_length > len(cur_path):
# approach from the right
cur_file_name, cur_file_ext = ek(os.path.splitext, cur_path) # @UnusedVariable
else:
# approach from the left
cur_file_ext = cur_path[old_path_length:]
cur_file_name = cur_path[:old_path_length]
if cur_file_ext[1:] in subtitle_extensions:
# Extract subtitle language from filename
sublang = ek(os.path.splitext, cur_file_name)[1][1:]
# Check if the language extracted from filename is a valid language
if sublang in sickbeard.subtitles.subtitle_code_filter():
cur_file_ext = '.' + sublang + cur_file_ext
# put the extension on the incoming file
new_path += cur_file_ext
make_dirs(ek(os.path.dirname, new_path))
# move the file
try:
logger.log(u"Renaming file from %s to %s" % (cur_path, new_path))
ek(shutil.move, cur_path, new_path)
except (OSError, IOError) as e:
logger.log(u"Failed renaming %s to %s : %r" % (cur_path, new_path, ex(e)), logger.ERROR)
return False
# clean up any old folders that are empty
delete_empty_folders(ek(os.path.dirname, cur_path))
return True
def delete_empty_folders(check_empty_dir, keep_dir=None):
"""
Walks backwards up the path and deletes any empty folders found.
:param check_empty_dir: The path to clean (absolute path to a folder)
:param keep_dir: Clean until this path is reached
"""
# treat check_empty_dir as empty when it only contains these items
ignore_items = []
logger.log(u"Trying to clean any empty folders under " + check_empty_dir)
# as long as the folder exists and doesn't contain any files, delete it
while ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
check_files = ek(os.listdir, check_empty_dir)
if not check_files or (len(check_files) <= len(ignore_items) and all(
[check_file in ignore_items for check_file in check_files])):
# directory is empty or contains only ignore_items
try:
logger.log(u"Deleting empty folder: " + check_empty_dir)
# need shutil.rmtree when ignore_items is really implemented
ek(os.rmdir, check_empty_dir)
# do the library update for synoindex
synoindex_notifier.deleteFolder(check_empty_dir)
except OSError as e:
logger.log(u"Unable to delete %s. Error: %r" % (check_empty_dir, repr(e)), logger.WARNING)
break
check_empty_dir = ek(os.path.dirname, check_empty_dir)
else:
break
def fileBitFilter(mode):
"""
Strip special filesystem bits from file
:param mode: mode to check and strip
:return: required mode for media file
"""
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def chmodAsParent(childPath):
"""
Retain permissions of parent for childs
(Does not work for Windows hosts)
:param childPath: Child Path to change permissions to sync from parent
"""
if os.name == 'nt' or os.name == 'ce':
return
parentPath = ek(os.path.dirname, childPath)
if not parentPath:
logger.log(u"No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
return
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
parentPathStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE])
childPathStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
if ek(os.path.isfile, childPath):
childMode = fileBitFilter(parentMode)
else:
childMode = parentMode
if childPath_mode == childMode:
return
childPath_owner = childPathStat.st_uid
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log(u"Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
return
try:
ek(os.chmod, childPath, childMode)
logger.log(u"Setting permissions for %s to %o as parent directory has %o" % (childPath, childMode, parentMode),
logger.DEBUG)
except OSError:
logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.DEBUG)
def fixSetGroupID(childPath):
"""
Inherid SGID from parent
(does not work on Windows hosts)
:param childPath: Path to inherit SGID permissions from parent
"""
if os.name == 'nt' or os.name == 'ce':
return
parentPath = ek(os.path.dirname, childPath)
parentStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
if parentMode & stat.S_ISGID:
parentGID = parentStat[stat.ST_GID]
childStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childGID = childStat[stat.ST_GID]
if childGID == parentGID:
return
childPath_owner = childStat.st_uid
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log(u"Not running as root or owner of " + childPath + ", not trying to set the set-group-ID",
logger.DEBUG)
return
try:
ek(os.chown, childPath, -1, parentGID) # @UndefinedVariable - only available on UNIX
logger.log(u"Respecting the set-group-ID bit on the parent directory for %s" % childPath, logger.DEBUG)
except OSError:
logger.log(
u"Failed to respect the set-group-ID bit on the parent directory for %s (setting group ID %i)" % (
childPath, parentGID), logger.ERROR)
def is_anime_in_show_list():
"""
Check if any shows in list contain anime
:return: True if global showlist contains Anime, False if not
"""
for show in sickbeard.showList:
if show.is_anime:
return True
return False
def update_anime_support():
"""Check if we need to support anime, and if we do, enable the feature"""
sickbeard.ANIMESUPPORT = is_anime_in_show_list()
def get_absolute_number_from_season_and_episode(show, season, episode):
"""
Find the absolute number for a show episode
:param show: Show object
:param season: Season number
:param episode: Episode number
:return: The absolute number
"""
absolute_number = None
if season and episode:
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? and season = ? and episode = ?"
sqlResults = myDB.select(sql, [show.indexerid, season, episode])
if len(sqlResults) == 1:
absolute_number = int(sqlResults[0]["absolute_number"])
logger.log(u"Found absolute number %s for show %s S%02dE%02d" % (absolute_number, show.name, season, episode), logger.DEBUG)
else:
logger.log(u"No entries for absolute number for show %s S%02dE%02d" % (show.name, season, episode), logger.DEBUG)
return absolute_number
def get_all_episodes_from_absolute_number(show, absolute_numbers, indexer_id=None):
episodes = []
season = None
if len(absolute_numbers):
if not show and indexer_id:
show = Show.find(sickbeard.showList, indexer_id)
for absolute_number in absolute_numbers if show else []:
ep = show.getEpisode(None, None, absolute_number=absolute_number)
if ep:
episodes.append(ep.episode)
season = ep.season # this will always take the last found season so eps that cross the season border are not handeled well
return season, episodes
def sanitizeSceneName(name, anime=False):
"""
Takes a show name and returns the "scenified" version of it.
:param anime: Some show have a ' in their name(Kuroko's Basketball) and is needed for search.
:return: A string containing the scene version of the show name given.
"""
if not name:
return ''
bad_chars = u',:()!?\u2019'
if not anime:
bad_chars += u"'"
# strip out any bad chars
for x in bad_chars:
name = name.replace(x, "")
# tidy up stuff that doesn't belong in scene names
name = name.replace("- ", ".").replace(" ", ".").replace("&", "and").replace('/', '.')
name = re.sub(r"\.\.*", ".", name)
if name.endswith('.'):
name = name[:-1]
return name
_binOps = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}
def arithmeticEval(s):
"""
A safe eval supporting basic arithmetic operations.
:param s: expression to evaluate
:return: value
"""
node = ast.parse(s, mode='eval')
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
else:
raise Exception('Unsupported type {}'.format(node))
return _eval(node.body)
def create_https_certificates(ssl_cert, ssl_key):
"""
Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
:param ssl_cert: Path of SSL certificate file to write
:param ssl_key: Path of SSL keyfile to write
:return: True on success, False on failure
"""
# assert isinstance(ssl_key, unicode)
# assert isinstance(ssl_cert, unicode)
try:
from OpenSSL import crypto # @UnresolvedImport
from certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, \
serial # @UnresolvedImport
except Exception:
logger.log(u"pyopenssl module missing, please install for https access", logger.WARNING)
return False
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 1024)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'SickRage'
pkey = createKeyPair(TYPE_RSA, 1024)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
# pylint: disable=no-member
# Module has no member
io.open(ssl_key, 'wb').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
io.open(ssl_cert, 'wb').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except Exception:
logger.log(u"Error creating SSL key and certificate", logger.ERROR)
return False
return True
def backupVersionedFile(old_file, version):
"""
Back up an old version of a file
:param old_file: Original file, to take a backup from
:param version: Version of file to store in backup
:return: True if success, False if failure
"""
numTries = 0
new_file = old_file + '.' + 'v' + str(version)
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, old_file):
logger.log(u"Not creating backup, %s doesn't exist" % old_file, logger.DEBUG)
break
try:
logger.log(u"Trying to back up %s to %s" % (old_file, new_file), logger.DEBUG)
shutil.copy(old_file, new_file)
logger.log(u"Backup done", logger.DEBUG)
break
except Exception as e:
logger.log(u"Error while trying to back up %s to %s : %r" % (old_file, new_file, ex(e)), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log(u"Trying again.", logger.DEBUG)
if numTries >= 10:
logger.log(u"Unable to back up %s to %s please do it manually." % (old_file, new_file), logger.ERROR)
return False
return True
def restoreVersionedFile(backup_file, version):
"""
Restore a file version to original state
:param backup_file: File to restore
:param version: Version of file to restore
:return: True on success, False on failure
"""
numTries = 0
new_file, _ = ek(os.path.splitext, backup_file)
restore_file = new_file + '.' + 'v' + str(version)
if not ek(os.path.isfile, new_file):
logger.log(u"Not restoring, %s doesn't exist" % new_file, logger.DEBUG)
return False
try:
logger.log(u"Trying to backup %s to %s.r%s before restoring backup"
% (new_file, new_file, version), logger.DEBUG)
shutil.move(new_file, new_file + '.' + 'r' + str(version))
except Exception as e:
logger.log(u"Error while trying to backup DB file %s before proceeding with restore: %r"
% (restore_file, ex(e)), logger.WARNING)
return False
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, restore_file):
logger.log(u"Not restoring, %s doesn't exist" % restore_file, logger.DEBUG)
break
try:
logger.log(u"Trying to restore file %s to %s" % (restore_file, new_file), logger.DEBUG)
shutil.copy(restore_file, new_file)
logger.log(u"Restore done", logger.DEBUG)
break
except Exception as e:
logger.log(u"Error while trying to restore file %s. Error: %r" % (restore_file, ex(e)), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log(u"Trying again. Attempt #: %s" % numTries, logger.DEBUG)
if numTries >= 10:
logger.log(u"Unable to restore file %s to %s" % (restore_file, new_file), logger.WARNING)
return False
return True
# generates a md5 hash of a file
def md5_for_file(filename, block_size=2 ** 16):
"""
Generate an md5 hash for a file
:param filename: File to generate md5 hash for
:param block_size: Block size to use (defaults to 2^16)
:return MD5 hexdigest on success, or None on failure
"""
# assert isinstance(filename, unicode)
try:
with io.open(filename, 'rb') as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5.hexdigest()
except Exception:
return None
def get_lan_ip():
"""Returns IP of system"""
try:
return [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
except Exception:
return socket.gethostname()
def check_url(url):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
# see also http://stackoverflow.com/questions/2924422
# http://stackoverflow.com/questions/1140661
good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]
host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]
try:
conn = httplib.HTTPConnection(host)
conn.request('HEAD', path)
return conn.getresponse().status in good_codes
except StandardError:
return None
def anon_url(*url):
"""
Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
"""
return '' if None in url else '%s%s' % (sickbeard.ANON_REDIRECT, ''.join(str(s) for s in url))
"""
Encryption
==========
By Pedro Jose Pereira Vieito <pvieito@gmail.com> (@pvieito)
* If encryption_version==0 then return data without encryption
* The keys should be unique for each device
To add a new encryption_version:
1) Code your new encryption_version
2) Update the last encryption_version available in webserve.py
3) Remember to maintain old encryption versions and key generators for retrocompatibility
"""
# Key Generators
unique_key1 = hex(uuid.getnode() ** 2) # Used in encryption v1
# Encryption Functions
def encrypt(data, encryption_version=0, _decrypt=False):
# Version 1: Simple XOR encryption (this is not very secure, but works)
if encryption_version == 1:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(unique_key1)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(unique_key1)))).strip()
# Version 2: Simple XOR encryption (this is not very secure, but works)
elif encryption_version == 2:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(sickbeard.ENCRYPTION_SECRET)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(sickbeard.ENCRYPTION_SECRET)))).strip()
# Version 0: Plain text
else:
return data
def decrypt(data, encryption_version=0):
return encrypt(data, encryption_version, _decrypt=True)
def full_sanitizeSceneName(name):
return re.sub('[. -]', ' ', sanitizeSceneName(name)).lower().lstrip()
def _check_against_names(nameInQuestion, show, season=-1):
showNames = []
if season in [-1, 1]:
showNames = [show.name]
showNames.extend(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=season))
for showName in showNames:
nameFromList = full_sanitizeSceneName(showName)
if nameFromList == nameInQuestion:
return True
return False
def get_show(name, tryIndexers=False):
if not sickbeard.showList:
return
showObj = None
fromCache = False
if not name:
return showObj
try:
# check cache for show
cache = sickbeard.name_cache.retrieveNameFromCache(name)
if cache:
fromCache = True
showObj = Show.find(sickbeard.showList, int(cache))
# try indexers
if not showObj and tryIndexers:
showObj = Show.find(
sickbeard.showList, searchIndexerForShowID(full_sanitizeSceneName(name), ui=classes.ShowListUI)[2])
# try scene exceptions
if not showObj:
ShowID = sickbeard.scene_exceptions.get_scene_exception_by_name(name)[0]
if ShowID:
showObj = Show.find(sickbeard.showList, int(ShowID))
# add show to cache
if showObj and not fromCache:
sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
except Exception as e:
logger.log(u"Error when attempting to find show: %s in SickRage. Error: %r " % (name, repr(e)), logger.DEBUG)
return showObj
def is_hidden_folder(folder):
"""
Returns True if folder is hidden.
On Linux based systems hidden folders start with . (dot)
:param folder: Full path of folder to check
"""
def is_hidden(filepath):
name = ek(os.path.basename, ek(os.path.abspath, filepath))
return name.startswith('.') or has_hidden_attribute(filepath)
def has_hidden_attribute(filepath):
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
if ek(os.path.isdir, folder):
if is_hidden(folder):
return True
return False
def real_path(path):
"""
Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components.
"""
return ek(os.path.normpath, ek(os.path.normcase, ek(os.path.realpath, path)))
def validateShow(show, season=None, episode=None):
indexer_lang = show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(show.indexer).api_params.copy()
if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
lINDEXER_API_PARMS['language'] = indexer_lang
if show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show.indexer).indexer(**lINDEXER_API_PARMS)
if season is None and episode is None:
return t
return t[show.indexerid][season][episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
pass
def set_up_anidb_connection():
"""Connect to anidb"""
if not sickbeard.USE_ANIDB:
logger.log(u"Usage of anidb disabled. Skiping", logger.DEBUG)
return False
if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
logger.log(u"anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
return False
if not sickbeard.ADBA_CONNECTION:
def anidb_logger(msg):
return logger.log(u"anidb: %s " % msg, logger.DEBUG)
try:
sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)
except Exception as e:
logger.log(u"anidb exception msg: %r " % repr(e), logger.WARNING)
return False
try:
if not sickbeard.ADBA_CONNECTION.authed():
sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
else:
return True
except Exception as e:
logger.log(u"anidb exception msg: %r " % repr(e), logger.WARNING)
return False
return sickbeard.ADBA_CONNECTION.authed()
def makeZip(fileList, archive):
"""
Create a ZIP of files
:param fileList: A list of file names - full path each name
:param archive: File name for the archive with a full path
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f)
a.close()
return True
except Exception as e:
logger.log(u"Zip creation error: %r " % repr(e), logger.ERROR)
return False
def extractZip(archive, targetDir):
"""
Unzip a file to a directory
:param fileList: A list of file names - full path each name
:param archive: The file name for the archive with a full path
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
filename = ek(os.path.basename, member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = file(ek(os.path.join, targetDir, filename), "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
zip_file.close()
return True
except Exception as e:
logger.log(u"Zip extraction error: %r " % repr(e), logger.ERROR)
return False
def backupConfigZip(fileList, archive, arcname=None):
"""
Store the config file as a ZIP
:param fileList: List of files to store
:param archive: ZIP file name
:param arcname: Archive path
:return: True on success, False on failure
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f, ek(os.path.relpath, f, arcname))
a.close()
return True
except Exception as e:
logger.log(u"Zip creation error: %r " % repr(e), logger.ERROR)
return False
def restoreConfigZip(archive, targetDir):
"""
Restores a Config ZIP file back in place
:param archive: ZIP filename
:param targetDir: Directory to restore to
:return: True on success, False on failure
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
else:
def path_leaf(path):
head, tail = ek(os.path.split, path)
return tail or ek(os.path.basename, head)
bakFilename = '{0}-{1}'.format(path_leaf(targetDir), datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
shutil.move(targetDir, ek(os.path.join, ek(os.path.dirname, targetDir), bakFilename))
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
zip_file.extract(member, targetDir)
zip_file.close()
return True
except Exception as e:
logger.log(u"Zip extraction error: %r" % ex(e), logger.ERROR)
shutil.rmtree(targetDir)
return False
def mapIndexersToShow(showObj):
mapped = {}
# init mapped indexers object
for indexer in sickbeard.indexerApi().indexers:
mapped[indexer] = showObj.indexerid if int(indexer) == int(showObj.indexer) else 0
myDB = db.DBConnection()
sqlResults = myDB.select(
"SELECT * FROM indexer_mapping WHERE indexer_id = ? AND indexer = ?",
[showObj.indexerid, showObj.indexer])
# for each mapped entry
for curResult in sqlResults:
nlist = [i for i in curResult if i is not None]
# Check if its mapped with both tvdb and tvrage.
if len(nlist) >= 4:
logger.log(u"Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
mapped[int(curResult['mindexer'])] = int(curResult['mindexer_id'])
break
else:
sql_l = []
for indexer in sickbeard.indexerApi().indexers:
if indexer == showObj.indexer:
mapped[indexer] = showObj.indexerid
continue
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['custom_ui'] = classes.ShowListUI
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
try:
mapped_show = t[showObj.name]
except Exception:
logger.log(u"Unable to map " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name + ", skipping it", logger.DEBUG)
continue
if mapped_show and len(mapped_show) == 1:
logger.log(u"Mapping " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name, logger.DEBUG)
mapped[indexer] = int(mapped_show[0]['id'])
logger.log(u"Adding indexer mapping to DB for show: " + showObj.name, logger.DEBUG)
sql_l.append([
"INSERT OR IGNORE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer) VALUES (?,?,?,?)",
[showObj.indexerid, showObj.indexer, int(mapped_show[0]['id']), indexer]])
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
return mapped
def touchFile(fname, atime=None):
"""
Touch a file (change modification date)
:param fname: Filename to touch
:param atime: Specific access time (defaults to None)
:return: True on success, False on failure
"""
if atime is not None:
try:
with file(fname, 'a'):
os.utime(fname, (atime, atime))
return True
except Exception as e:
if e.errno == errno.ENOSYS:
logger.log(u"File air date stamping not available on your OS. Please disable setting", logger.DEBUG)
elif e.errno == errno.EACCES:
logger.log(u"File air date stamping failed(Permission denied). Check permissions for file: %s" % fname, logger.ERROR)
else:
logger.log(u"File air date stamping failed. The error is: %r" % ex(e), logger.ERROR)
return False
def _getTempDir():
"""
Returns the [system temp dir]/tvdb_api-u501 (or
tvdb_api-myuser)
"""
import getpass
if hasattr(os, 'getuid'):
uid = "u%d" % (os.getuid())
else:
# For Windows
try:
uid = getpass.getuser()
except ImportError:
return ek(os.path.join, tempfile.gettempdir(), "sickrage")
return ek(os.path.join, tempfile.gettempdir(), "sickrage-%s" % uid)
def _setUpSession(session, headers):
"""
Returns a session initialized with default cache and parameter settings
:param session: session object to (re)use
:param headers: Headers to pass to session
:return: session object
"""
# request session
# Lets try without caching sessions to disk for awhile
# cache_dir = sickbeard.CACHE_DIR or _getTempDir()
# session = CacheControl(sess=session, cache=caches.FileCache(ek(os.path.join, cache_dir, 'sessions'), use_dir_lock=True), cache_etags=False)
# request session clear residual referer
# pylint: disable=superfluous-parens
# These extra parens are necessary!
if 'Referer' in session.headers and 'Referer' not in (headers or {}):
session.headers.pop('Referer')
# request session headers
session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
if headers:
session.headers.update(headers)
# request session ssl verify
session.verify = certifi.old_where() if sickbeard.SSL_VERIFY else False
# request session proxies
if 'Referer' not in session.headers and sickbeard.PROXY_SETTING:
logger.log(u"Using global proxy: " + sickbeard.PROXY_SETTING, logger.DEBUG)
scheme, address = urllib2.splittype(sickbeard.PROXY_SETTING)
address = sickbeard.PROXY_SETTING if scheme else 'http://' + sickbeard.PROXY_SETTING
session.proxies = {
"http": address,
"https": address,
}
session.headers.update({'Referer': address})
if 'Content-Type' in session.headers:
session.headers.pop('Content-Type')
return session
def getURL(url, post_data=None, params=None, headers=None, timeout=30, session=None, json=False, need_bytes=False):
"""
Returns a byte-string retrieved from the url provider.
"""
session = _setUpSession(session, headers)
if params and isinstance(params, (list, dict)):
for param in params:
if isinstance(params[param], unicode):
params[param] = params[param].encode('utf-8')
session.params = params
try:
# decide if we get or post data to server
if post_data:
if isinstance(post_data, (list, dict)):
for param in post_data:
if isinstance(post_data[param], unicode):
post_data[param] = post_data[param].encode('utf-8')
session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
resp = session.post(url, data=post_data, timeout=timeout, allow_redirects=True, verify=session.verify)
else:
resp = session.get(url, timeout=timeout, allow_redirects=True, verify=session.verify)
if not resp.ok:
logger.log(u"Requested getURL %s returned status code is %s: %s"
% (url, resp.status_code, http_code_description(resp.status_code)), logger.DEBUG)
return None
except (SocketTimeout, TypeError) as e:
logger.log(u"Connection timed out (sockets) accessing getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.HTTPError as e:
logger.log(u"HTTP error in getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.ConnectionError as e:
logger.log(u"Connection error to getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.Timeout as e:
logger.log(u"Connection timed out accessing getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.ContentDecodingError:
logger.log(u"Content-Encoding was gzip, but content was not compressed. getURL: %s" % url, logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
return None
except Exception as e:
logger.log(u"Unknown exception in getURL %s Error: %r" % (url, ex(e)), logger.WARNING)
logger.log(traceback.format_exc(), logger.WARNING)
return None
return (resp.text, resp.content)[need_bytes] if not json else resp.json()
def download_file(url, filename, session=None, headers=None):
"""
Downloads a file specified
:param url: Source URL
:param filename: Target file on filesystem
:param session: request session to use
:param headers: override existing headers in request session
:return: True on success, False on failure
"""
session = _setUpSession(session, headers)
session.stream = True
try:
with closing(session.get(url, allow_redirects=True, verify=session.verify)) as resp:
if not resp.ok:
logger.log(u"Requested download url %s returned status code is %s: %s"
% (url, resp.status_code, http_code_description(resp.status_code)), logger.DEBUG)
return False
try:
with io.open(filename, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
fp.flush()
chmodAsParent(filename)
except Exception:
logger.log(u"Problem setting permissions or writing file to: %s" % filename, logger.WARNING)
except (SocketTimeout, TypeError) as e:
remove_file_failed(filename)
logger.log(u"Connection timed out (sockets) while loading download URL %s Error: %r" % (url, ex(e)), logger.WARNING)
return None
except requests.exceptions.HTTPError as e:
remove_file_failed(filename)
logger.log(u"HTTP error %r while loading download URL %s " % (ex(e), url), logger.WARNING)
return False
except requests.exceptions.ConnectionError as e:
remove_file_failed(filename)
logger.log(u"Connection error %r while loading download URL %s " % (ex(e), url), logger.WARNING)
return False
except requests.exceptions.Timeout as e:
remove_file_failed(filename)
logger.log(u"Connection timed out %r while loading download URL %s " % (ex(e), url), logger.WARNING)
return False
except EnvironmentError as e:
remove_file_failed(filename)
logger.log(u"Unable to save the file: %r " % ex(e), logger.WARNING)
return False
except Exception:
remove_file_failed(filename)
logger.log(u"Unknown exception while loading download URL %s : %r" % (url, traceback.format_exc()), logger.WARNING)
return False
return True
def get_size(start_path='.'):
"""
Find the total dir and filesize of a path
:param start_path: Path to recursively count size
:return: total filesize
"""
if not ek(os.path.isdir, start_path):
return -1
total_size = 0
for dirpath, _, filenames in ek(os.walk, start_path):
for f in filenames:
fp = ek(os.path.join, dirpath, f)
try:
total_size += ek(os.path.getsize, fp)
except OSError as e:
logger.log(u"Unable to get size for file %s Error: %r" % (fp, ex(e)), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
return total_size
def generateApiKey():
""" Return a new randomized API_KEY"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
def remove_article(text=''):
"""Remove the english articles from a text string"""
return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)
def generateCookieSecret():
"""Generate a new cookie secret"""
return base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
def verify_freespace(src, dest, oldfile=None):
"""
Checks if the target system has enough free space to copy or move a file.
:param src: Source filename
:param dest: Destination path
:param oldfile: File to be replaced (defaults to None)
:return: True if there is enough space for the file, False if there isn't. Also returns True if the OS doesn't support this option
"""
if not isinstance(oldfile, list):
oldfile = [oldfile]
logger.log(u"Trying to determine free space on destination drive", logger.DEBUG)
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = ek(os.statvfs, path)
free = st.f_bavail * st.f_frsize
return free
elif os.name == 'nt': # Windows
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
logger.log(u"Unable to determine free space, something went wrong", logger.WARNING)
raise ctypes.WinError()
return free.value
else:
logger.log(u"Unable to determine free space on your OS")
return True
if not ek(os.path.isfile, src):
logger.log(u"A path to a file is required for the source. " + src + " is not a file.", logger.WARNING)
return True
try:
diskfree = disk_usage(dest)
except Exception:
logger.log(u"Unable to determine free space, so I will assume there is enough.", logger.WARNING)
return True
neededspace = ek(os.path.getsize, src)
if oldfile:
for f in oldfile:
if ek(os.path.isfile, f.location):
diskfree += ek(os.path.getsize, f.location)
if diskfree > neededspace:
return True
else:
logger.log(u"Not enough free space: Needed: %s bytes ( %s ), found: %s bytes ( %s )"
% (neededspace, pretty_file_size(neededspace), diskfree, pretty_file_size(diskfree)), logger.WARNING)
return False
# https://gist.github.com/thatalextaylor/7408395
def pretty_time_delta(seconds):
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
time_delta = sign_string
if days > 0:
time_delta += ' %dd' % days
if hours > 0:
time_delta += ' %dh' % hours
if minutes > 0:
time_delta += ' %dm' % minutes
if seconds > 0:
time_delta += ' %ds' % seconds
return time_delta
def isFileLocked(checkfile, writeLockCheck=False):
"""
Checks to see if a file is locked. Performs three checks
1. Checks if the file even exists
2. Attempts to open the file for reading. This will determine if the file has a write lock.
Write locks occur when the file is being edited or copied to, e.g. a file copy destination
3. If the readLockCheck parameter is True, attempts to rename the file. If this fails the
file is open by some other process for reading. The file can be read, but not written to
or deleted.
:param file: the file being checked
:param writeLockCheck: when true will check if the file is locked for writing (prevents move operations)
"""
checkfile = ek(os.path.abspath, checkfile)
if not ek(os.path.exists, checkfile):
return True
try:
f = io.open(checkfile, 'rb')
f.close()
except IOError:
return True
if writeLockCheck:
lockFile = checkfile + ".lckchk"
if ek(os.path.exists, lockFile):
ek(os.remove, lockFile)
try:
ek(os.rename, checkfile, lockFile)
time.sleep(1)
ek(os.rename, lockFile, checkfile)
except (OSError, IOError):
return True
return False
def getDiskSpaceUsage(diskPath=None):
"""
returns the free space in human readable bytes for a given path or False if no path given
:param diskPath: the filesystem path being checked
"""
if diskPath and ek(os.path.exists, diskPath):
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(diskPath), None, None, ctypes.pointer(free_bytes))
return pretty_file_size(free_bytes.value)
else:
st = ek(os.statvfs, diskPath)
return pretty_file_size(st.f_bavail * st.f_frsize)
else:
return False
def getTVDBFromID(indexer_id, indexer):
session = requests.Session()
tvdb_id = ''
if indexer == 'IMDB':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s" % indexer_id
data = getURL(url, session=session, need_bytes=True)
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'ZAP2IT':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?zap2it=%s" % indexer_id
data = getURL(url, session=session, need_bytes=True)
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'TVMAZE':
url = "http://api.tvmaze.com/shows/%s" % indexer_id
data = getURL(url, session=session, json=True)
if data is None:
return tvdb_id
tvdb_id = data['externals']['thetvdb']
return tvdb_id
else:
return tvdb_id
def is_ip_private(ip):
priv_lo = re.compile(r"^127\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_24 = re.compile(r"^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_20 = re.compile(r"^192\.168\.\d{1,3}.\d{1,3}$")
priv_16 = re.compile(r"^172.(1[6-9]|2[0-9]|3[0-1]).[0-9]{1,3}.[0-9]{1,3}$")
return priv_lo.match(ip) or priv_24.match(ip) or priv_20.match(ip) or priv_16.match(ip)
|
jbedorf/tensorflow
|
refs/heads/master
|
tensorflow/python/autograph/converters/lists.py
|
30
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for list operations.
This includes converting Python lists to TensorArray/TensorList.
"""
# TODO(mdan): Elaborate the logic here.
# TODO(mdan): Does it even make sense to attempt to try to use TAs?
# The current rule (always convert to TensorArray) is naive and insufficient.
# In general, a better mechanism could look like:
# * convert to TensorList by default
# * leave as Python list if the user explicitly forbids it
# * convert to TensorArray only when complete write once behavior can be
# guaranteed (e.g. list comprehensions)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
# Tags for local state.
POP_USES = 'pop_uses'
class ListTransformer(converter.Base):
"""Converts lists and related operations to their TF counterpart."""
def visit_List(self, node):
node = self.generic_visit(node)
template = """
ag__.new_list(elements)
"""
return templates.replace_as_expression(template, elements=node)
def _replace_append_call(self, node):
assert len(node.args) == 1
assert isinstance(node.func, gast.Attribute)
template = """
target = ag__.list_append(target, element)
"""
return templates.replace(
template,
target=node.func.value,
element=node.args[0])
def _replace_pop_call(self, node):
# Expressions that use pop() are converted to a statement + expression.
#
# For example:
#
# print(target.pop())
#
# ... is converted to:
#
# target, target_pop = ag__.list_pop(target)
# print(target_pop)
#
# Here, we just generate the variable name and swap it in,
# and _generate_pop_operation will handle the rest.
#
# Multiple uses of pop() are allowed:
#
# print(tartget.pop(), target.pop())
# print(tartget.pop().pop())
#
assert isinstance(node.func, gast.Attribute)
scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
target_node = node.func.value
# Attempt to use a related name if one exists. Otherwise use something
# generic.
if anno.hasanno(target_node, anno.Basic.QN):
target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
else:
target_name = 'list_'
pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
pop_uses = self.get_local(POP_USES, [])
pop_uses.append((node, pop_var_name))
self.set_local(POP_USES, pop_uses)
return templates.replace_as_expression('var_name', var_name=pop_var_name)
def _replace_stack_call(self, node):
assert len(node.args) == 1
dtype = self.get_definition_directive(
node.args[0],
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.list_stack(
target,
opts=ag__.ListStackOpts(
element_dtype=dtype,
original_call=orig_call))
"""
return templates.replace_as_expression(
template,
dtype=dtype,
target=node.args[0],
orig_call=node.func)
def visit_Call(self, node):
node = self.generic_visit(node)
# TODO(mdan): This is insufficient if target is a function argument.
# In the case of function arguments, we need to add the list to the
# function's return value, because it is being modified.
# TODO(mdan): Checking just the name is brittle, can it be improved?
if isinstance(node.func, gast.Attribute):
func_name = node.func.attr
if func_name == 'append' and (len(node.args) == 1):
node = self._replace_append_call(node)
elif func_name == 'pop' and (len(node.args) <= 1):
node = self._replace_pop_call(node)
elif (func_name == 'stack' and (len(node.args) == 1) and
(not node.keywords or node.keywords[0].arg == 'strict')):
# This avoids false positives with keyword args.
# TODO(mdan): handle kwargs properly.
node = self._replace_stack_call(node)
return node
def _generate_pop_operation(self, original_call_node, pop_var_name):
assert isinstance(original_call_node.func, gast.Attribute)
if original_call_node.args:
pop_element = original_call_node.args[0]
else:
pop_element = parser.parse_expression('None')
# The call will be something like "target.pop()", and the dtype is hooked to
# target, hence the func.value.
# TODO(mdan): For lists of lists, this won't work.
# The reason why it won't work is because it's unclear how to annotate
# the list as a "list of lists with a certain element type" when using
# operations like `l.pop().pop()`.
dtype = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
shape = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'shape',
default=templates.replace_as_expression('None'))
template = """
target, pop_var_name = ag__.list_pop(
target, element,
opts=ag__.ListPopOpts(element_dtype=dtype, element_shape=shape))
"""
return templates.replace(
template,
target=original_call_node.func.value,
pop_var_name=pop_var_name,
element=pop_element,
dtype=dtype,
shape=shape)
def _postprocess_statement(self, node):
"""Inserts any separate pop() calls that node may use."""
pop_uses = self.get_local(POP_USES, None)
if pop_uses:
replacements = []
for original_call_node, pop_var_name in pop_uses:
replacements.extend(
self._generate_pop_operation(original_call_node, pop_var_name))
replacements.append(node)
node = replacements
self.exit_local_scope()
return node, None
# TODO(mdan): Should we have a generic visit_block instead?
# Right now it feels that a visit_block would add too much magic that's
# hard to follow.
def _visit_and_process_block(self, block):
return self.visit_block(
block,
before_visit=self.enter_local_scope,
after_visit=self._postprocess_statement)
def visit_FunctionDef(self, node):
node.args = self.generic_visit(node.args)
node.decorator_list = self.visit_block(node.decorator_list)
node.body = self._visit_and_process_block(node.body)
return node
def visit_For(self, node):
node.target = self.visit(node.target)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_and_process_block(node.body)
return node
def transform(node, ctx):
return ListTransformer(ctx).visit(node)
|
2014c2g2/teamwork
|
refs/heads/master
|
wsgi/programs/c2g15/__init__.py
|
5
|
import cherrypy
# 這是 C2G15 類別的定義
class C2G15(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014C2 協同專案下的 c2g15 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="fillpoly">c2g15 fillpoly 繪圖</a><br />
<a href="drawline">c2g15 drawline 繪圖</a><br />
<a href="drawsquare">c2g15 drawsquare 繪圖</a><br />
<a href="drawstar">c2g15 drawstar 繪圖</a><br />
<a href=" triangle">c2g15 triangle 繪圖</a><br />
<a href=" triangle2">c2g15 triangle2 繪圖</a><br />
'''
return outstring
# 以下為 c2g15 組所建立的 CherryPy 程式方法, 這裡的 fillpoly 利用 Brython 執行網際繪圖
'''
假如採用下列規畫
import programs.c2g15 as c2g15
root.c2g15 = c2g15.C2G15()
則程式啟動後, 可以利用 /c2g15/fillpoly 呼叫函式執行
'''
@cherrypy.expose
def fillpoly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入數學模組的所有方法
from math import *
# 導入時間模組
import time
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義座標轉換(0, 0) 到 (75, 20)
def change_ref_system(x, y):
return (20 + x * 8, 420 - y * 20)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(75,50)
ctx.lineTo(100,75)
ctx.lineTo(100,25)
ctx.fill()
def star():
ctx.beginPath()
ctx.moveTo(0,50)
ctx.lineTo(11,16)
ctx.lineTo(48,16)
ctx.fill()
ctx.fillStyle = "blue"
fill()
star()
x1, y1 = change_ref_system(0, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="blue")
x1, y1 = change_ref_system(70, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="red")
</script>
</body>
</html>
'''
return outstring
'''
假如採用下列規畫
import programs.c2g15 as c2g15
root.c2g15 = c2g15.C2G15()
則程式啟動後, 可以利用 /c2g9/drawline 呼叫函式執行
'''
@cherrypy.expose
def drawsquare(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(0, 0, 0, 100)
draw_line(0, 100, 100, 100)
draw_line(100, 100, 100, 0)
draw_line(100, 0, 0 , 0)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def drawstar(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(300, 300, 358.78, 342.71)
draw_line(358.78, 342.71, 417.56, 300)
draw_line(417.56, 300, 395.11, 369.1)
draw_line(395.11, 369.1, 453.88, 411.8)
draw_line(453.88, 411.8, 381.23, 411.8)
draw_line(381.23, 411.8, 358.78, 480.9)
draw_line(358.78, 480.9, 336.33, 411.8)
draw_line(336.33, 411.8, 263.67, 411.8)
draw_line(263.67, 411.8, 322.45, 369.1)
draw_line(322.45, 369.1, 300, 300)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def triangle(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(100, 100, 150 , 250, linethick = 3, color="blue")
draw_line(150, 250 ,400 , 400, linethick = 3, color="blue")
draw_line(400, 400, 100 , 100, linethick = 3, color="blue" )
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def triangle2(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(100,100)
ctx.lineTo(150,250)
ctx.lineTo(400,400)
ctx.fill()
ctx.fillStyle = "red"
fill()
draw_line(100, 100, 150 , 250, linethick = 3, color="blue")
draw_line(150, 250 ,400 , 400, linethick = 3, color="blue")
draw_line(400, 400, 100 , 100, linethick = 3, color="blue" )
</script>
</body>
</html>
'''
return outstring
|
wdv4758h/ZipPy
|
refs/heads/master
|
lib-python/3/test/crashers/nasty_eq_vs_dict.py
|
63
|
# from http://mail.python.org/pipermail/python-dev/2001-June/015239.html
# if you keep changing a dictionary while looking up a key, you can
# provoke an infinite recursion in C
# At the time neither Tim nor Michael could be bothered to think of a
# way to fix it.
class Yuck:
def __init__(self):
self.i = 0
def make_dangerous(self):
self.i = 1
def __hash__(self):
# direct to slot 4 in table of size 8; slot 12 when size 16
return 4 + 8
def __eq__(self, other):
if self.i == 0:
# leave dict alone
pass
elif self.i == 1:
# fiddle to 16 slots
self.__fill_dict(6)
self.i = 2
else:
# fiddle to 8 slots
self.__fill_dict(4)
self.i = 1
return 1
def __fill_dict(self, n):
self.i = 0
dict.clear()
for i in range(n):
dict[i] = i
dict[self] = "OK!"
y = Yuck()
dict = {y: "OK!"}
z = Yuck()
y.make_dangerous()
print(dict[z])
|
edry/edx-platform
|
refs/heads/master
|
common/djangoapps/student/migrations/0019_create_approved_demographic_fields_fall_2012.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserProfile.occupation'
db.delete_column('auth_userprofile', 'occupation')
# Deleting field 'UserProfile.telephone_number'
db.delete_column('auth_userprofile', 'telephone_number')
# Deleting field 'UserProfile.date_of_birth'
db.delete_column('auth_userprofile', 'date_of_birth')
# Deleting field 'UserProfile.country'
db.delete_column('auth_userprofile', 'country')
# Adding field 'UserProfile.year_of_birth'
db.add_column('auth_userprofile', 'year_of_birth',
self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.level_of_education'
db.add_column('auth_userprofile', 'level_of_education',
self.gf('django.db.models.fields.CharField')(db_index=True, max_length=6, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.goals'
db.add_column('auth_userprofile', 'goals',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding index on 'UserProfile', fields ['gender']
db.create_index('auth_userprofile', ['gender'])
def backwards(self, orm):
# Removing index on 'UserProfile', fields ['gender']
db.delete_index('auth_userprofile', ['gender'])
# Adding field 'UserProfile.occupation'
db.add_column('auth_userprofile', 'occupation',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.telephone_number'
db.add_column('auth_userprofile', 'telephone_number',
self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.date_of_birth'
db.add_column('auth_userprofile', 'date_of_birth',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.country'
db.add_column('auth_userprofile', 'country',
self.gf('django_countries.fields.CountryField')(max_length=2, null=True, blank=True),
keep_default=False)
# Deleting field 'UserProfile.year_of_birth'
db.delete_column('auth_userprofile', 'year_of_birth')
# Deleting field 'UserProfile.level_of_education'
db.delete_column('auth_userprofile', 'level_of_education')
# Deleting field 'UserProfile.goals'
db.delete_column('auth_userprofile', 'goals')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
JioCloud/contrail-controller
|
refs/heads/master
|
src/config/common/vnc_type_conv.py
|
22
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import re
import types
from types import *
import vnc_api_service
# dictionary representation to XML representation
def dict_to_elem(wrap_tag, obj_dict):
xml_str = "<%s>" % (wrap_tag)
for fname in obj_dict:
if isinstance(obj_dict[fname], dict):
# call recursively with dict
xml_str += dict_to_elem(fname, obj_dict[fname])
else:
xml_str += "<%s>%s</%s>" % (fname, obj_dict[fname], fname)
xml_str += "</%s>" % (wrap_tag)
return xml_str
# XML representation to dictionary representation
def elem_to_dict(obj_elem):
obj_dict = {}
for field in obj_elem.getchildren():
fname = re.sub("{.*}", "", field.tag)
if not field.text and not field.getchildren():
return {}
if not field.text:
val = elem_to_dict(field)
elif (field.text.isdigit()):
val = int(field.text)
else:
val = unicode(field.text)
obj_dict[fname] = val
return obj_dict
# convert object to dictionary representation
def obj_to_dict(obj):
obj_dict = {}
for fname in obj.__dict__:
if obj.__dict__[fname] is None:
continue
val = obj.__dict__[fname]
if isinstance(val, str):
# unicode it if field is string
val = unicode(val)
elif isinstance(obj.__dict__[fname], InstanceType):
# call recursively if field is object
val = obj_to_dict(val)
obj_dict[unicode(fname)] = val
return obj_dict
def subnet_elem_to_dict(sn_elem):
# TODO replace with autogenerated code
sn_d = {}
for field in sn_elem.getchildren():
fname = re.sub("{.*}", "", field.tag)
if (field.text.isdigit()):
sn_d[fname] = int(field.text)
else:
sn_d[fname] = unicode(field.text)
return sn_d
def subnet_obj_to_dict(sn_obj):
return obj_to_dict(sn_obj)
def subnet_dict_to_obj(sn_dict):
# construct object from dict contents
# will fail if server's obj defn has more fields than client's defn :(
#kwargs = {}
# for fname in sn_dict:
# kwargs[fname] = sn_dict[fname]
#sn_obj = vnc_api_service.subnet_s(**kwargs)
sn_obj = vnc_api_service.subnet_s()
sn_obj.__dict__.update(sn_dict)
return sn_obj
def vn_dict_to_obj(vn_dict):
# construct object from dict contents
# will fail if server's obj defn has more fields than client's defn :(
#kwargs = {}
# for fname in vn_dict:
# kwargs[fname] = vn_dict[fname]
#vn_obj = vnc_api_service.vn_s(**kwargs)
vn_obj = vnc_api_service.vn_s()
vn_obj.__dict__.update(vn_dict)
if vn_obj.vn_subnets:
# construct list of sn objs
vn_obj.vn_subnets = []
for sn_dict in vn_dict['vn_subnets']:
vn_obj.vn_subnets.append(subnet_dict_to_obj(sn_dict))
return vn_obj
def policy_dict_to_obj(policy_dict):
# construct object from dict contents
# will fail if server's obj defn has more fields than client's defn :(
#kwargs = {}
# for fname in vn_dict:
# kwargs[fname] = vn_dict[fname]
#vn_obj = vnc_api_service.vn_s(**kwargs)
policy_obj = vnc_api_service.policy_s()
policy_obj.__dict__.update(policy_dict)
return policy_obj
def sg_rule_elem_to_dict(sg_rule_elem):
return elem_to_dict(sg_rule_elem)
def sg_rule_dict_to_elem(sg_rule_d):
return dict_to_elem("sg_rule", sg_rule_d)
def sg_rule_obj_to_dict(sg_rule_obj):
return obj_to_dict(sg_rule_obj)
def policy_entry_elem_to_dict(policy_entry_elem):
return elem_to_dict(policy_entry_elem)
def policy_entry_dict_to_elem(policy_entry_d):
return dict_to_elem("policy_entry", policy_entry_d)
def policy_entry_obj_to_dict(policy_entry_obj):
return obj_to_dict(policy_entry_obj)
|
Timmenem/micropython
|
refs/heads/master
|
tests/unicode/unicode_pos.py
|
116
|
# str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
|
vsol75/suricata
|
refs/heads/master
|
scripts/suricatasc/setup.py
|
18
|
#!/usr/bin/env python
from distutils.core import setup
SURICATASC_VERSION = "0.9"
setup(name='suricatasc',
version=SURICATASC_VERSION,
description='Suricata unix socket client',
author='Eric Leblond',
author_email='eric@regit.org',
url='https://www.suricata-ids.org/',
scripts=['suricatasc'],
packages=['suricatasc'],
package_dir={'suricatasc':'src'},
provides=['suricatasc'],
requires=['argparse','simplejson'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Systems Administration',
],
)
|
meefik/tinykernel-flo
|
refs/heads/tiny-jb-mr2
|
tools/perf/python/twatch.py
|
7370
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
fchu/hadoop-0.20.205
|
refs/heads/master
|
contrib/hod/hodlib/Hod/nodePool.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""defines nodepool and nodeset as abstract interface for batch system"""
# -*- python -*-
from hodlib.GridServices.service import *
class NodeSet:
"""a set of nodes as one allocation unit"""
PENDING, COMMITTED, COMPLETE = range(3)
def __init__(self, id, numNodes, preferredList, isPreemptee):
self.id = id
self.numNodes = numNodes
self.isPreemptee = isPreemptee
self.preferredList = preferredList
self.cmdDescSet = []
def getId(self):
"""returns a unique id of the nodeset"""
return self.id
def registerCommand(self, cmdDesc):
"""register a command to the nodeset"""
self.cmdDescSet.append(cmdDesc)
def getAddrList(self):
"""get list of node host names
May return empty list if node set is not allocated yet"""
raise NotImplementedError
def _getNumNodes(self):
return self.numNodes
def _isPreemptee(self):
return self.isPreemptee
def _getPreferredList(self):
return self.preferredList
def _getCmdSet(self):
return self.cmdDescSet
class NodePool:
"""maintains a collection of node sets as they get allocated.
Also the base class for all kinds of nodepools. """
def __init__(self, nodePoolDesc, cfg, log):
self.nodePoolDesc = nodePoolDesc
self.nodeSetDict = {}
self._cfg = cfg
self.nextNodeSetId = 0
self._log = log
def newNodeSet(self, numNodes, preferred=[], isPreemptee=True, id=None):
"""create a nodeset possibly with asked properties"""
raise NotImplementedError
def submitNodeSet(self, nodeSet, walltime = None, qosLevel = None,
account = None, resourcelist = None):
"""submit the nodeset request to nodepool
return False if error happened"""
raise NotImplementedError
def pollNodeSet(self, nodeSet):
"""return status of node set"""
raise NotImplementedError
def getWorkers(self):
"""return the hosts that comprise this nodepool"""
raise NotImplementedError
def runWorkers(self, nodeSet = None, args = []):
"""Run node set workers."""
raise NotImplementedError
def freeNodeSet(self, nodeset):
"""free a node set"""
raise NotImplementedError
def finalize(self):
"""cleans up all nodesets"""
raise NotImplementedError
def getServiceId(self):
raise NotImplementedError
def getJobInfo(self, jobId=None):
raise NotImplementedError
def deleteJob(self, jobId):
"""Delete a job, given it's id"""
raise NotImplementedError
def isJobFeasible(self):
"""Check if job can run by looking at any user/job limits"""
raise NotImplementedError
def updateWorkerInfo(self, workerInfoMap, jobId):
"""Update information about the workers started by this NodePool."""
raise NotImplementedError
def getAccountString(self):
"""Return the account string for this job"""
raise NotImplementedError
def getNextNodeSetId(self):
id = self.nextNodeSetId
self.nextNodeSetId += 1
return id
|
meduz/NeuroTools
|
refs/heads/master
|
src/plotting.py
|
1
|
"""
NeuroTools.plotting
===================
This module contains a collection of tools for plotting and image processing that
shall facilitate the generation and handling of NeuroTools data visualizations.
It utilizes the Matplotlib and the Python Imaging Library (PIL) packages.
Classes
-------
SimpleMultiplot - object that creates and handles a figure consisting of multiple panels, all with the same datatype and the same x-range.
Functions
---------
get_display - returns a pylab object with a plot() function to draw the plots.
progress_bar - prints a progress bar to stdout, filled to the given ratio.
pylab_params - returns a dictionary with a set of parameters that help to nicely format figures by updating the pylab run command parameters dictionary 'pylab.rcParams'.
set_axis_limits - defines the axis limits in a plot.
set_labels - defines the axis labels of a plot.
set_pylab_params - updates a set of parameters within the the pylab run command parameters dictionary 'pylab.rcParams' in order to achieve nicely formatted figures.
save_2D_image - saves a 2D numpy array of gray shades between 0 and 1 to a PNG file.
save_2D_movie - saves a list of 2D numpy arrays of gray shades between 0 and 1 to a zipped tree of PNG files.
"""
import sys, numpy
from NeuroTools import check_dependency
# Check availability of pylab (essential!)
if check_dependency('matplotlib'):
from matplotlib import use
use('Agg')
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
if check_dependency('pylab'):
import pylab
# Check availability of PIL
PILIMAGEUSE = check_dependency('PIL')
if PILIMAGEUSE:
import PIL.Image as Image
########################################################
# UNIVERSAL FUNCTIONS AND CLASSES FOR NORMAL PYLAB USE #
########################################################
def get_display(display):
"""
Returns a pylab object with a plot() function to draw the plots.
Inputs:
display - if True, a new figure is created. Otherwise, if display is a
subplot object, this object is returned.
"""
if display is False:
return None
elif display is True:
pylab.figure()
return pylab
else:
return display
def progress_bar(progress):
"""
Prints a progress bar to stdout.
Inputs:
progress - a float between 0. and 1.
Example:
>> progress_bar(0.7)
|=================================== |
"""
progressConditionStr = "ERROR: The argument of function NeuroTools.plotting.progress_bar(...) must be a float between 0. and 1.!"
assert (type(progress) == float) and (progress >= 0.) and (progress <= 1.), progressConditionStr
length = 50
filled = int(round(length*progress))
print "|" + "=" * filled + " " * (length-filled) + "|\r",
sys.stdout.flush()
def pylab_params(fig_width_pt=246.0,
ratio=(numpy.sqrt(5)-1.0)/2.0,# Aesthetic golden mean ratio by default
text_fontsize=10, tick_labelsize=8, useTex=False):
"""
Returns a dictionary with a set of parameters that help to nicely format figures.
The return object can be used to update the pylab run command parameters dictionary 'pylab.rcParams'.
Inputs:
fig_width_pt - figure width in points. If you want to use your figure inside LaTeX,
get this value from LaTeX using '\\showthe\\columnwidth'.
ratio - ratio between the height and the width of the figure.
text_fontsize - size of axes and in-pic text fonts.
tick_labelsize - size of tick label font.
useTex - enables or disables the use of LaTeX for all labels and texts
(for details on how to do that, see http://www.scipy.org/Cookbook/Matplotlib/UsingTex).
"""
inches_per_pt = 1.0/72.27 # Convert pt to inch
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*ratio # height in inches
fig_size = [fig_width,fig_height]
params = {
'axes.labelsize' : text_fontsize,
'font.size' : text_fontsize,
'xtick.labelsize' : tick_labelsize,
'ytick.labelsize' : tick_labelsize,
'text.usetex' : useTex,
'figure.figsize' : fig_size}
return params
def set_axis_limits(subplot, xmin, xmax, ymin, ymax):
"""
Defines the axis limits of a plot.
Inputs:
subplot - the targeted plot
xmin, xmax - the limits of the x axis
ymin, ymax - the limits of the y axis
Example:
>> x = range(10)
>> y = []
>> for i in x: y.append(i*i)
>> pylab.plot(x,y)
>> plotting.set_axis_limits(pylab, 0., 10., 0., 100.)
"""
if hasattr(subplot, 'xlim'):
subplot.xlim(xmin, xmax)
subplot.ylim(ymin, ymax)
elif hasattr(subplot, 'set_xlim'):
subplot.set_xlim(xmin, xmax)
subplot.set_ylim(ymin, ymax)
else:
raise Exception('ERROR: The plot passed to function NeuroTools.plotting.set_axis_limits(...) does not provide limit defining functions.')
def set_labels(subplot, xlabel, ylabel):
"""
Defines the axis labels of a plot.
Inputs:
subplot - the targeted plot
xlabel - a string for the x label
ylabel - a string for the y label
Example:
>> x = range(10)
>> y = []
>> for i in x: y.append(i*i)
>> pylab.plot(x,y)
>> plotting.set_labels(pylab, 'x', 'y=x^2')
"""
if hasattr(subplot, 'xlabel'):
subplot.xlabel(xlabel)
subplot.ylabel(ylabel)
elif hasattr(subplot, 'set_xlabel'):
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
else:
raise Exception('ERROR: The plot passed to function NeuroTools.plotting.set_label(...) does not provide labelling functions.')
def set_pylab_params(fig_width_pt=246.0,
ratio=(numpy.sqrt(5)-1.0)/2.0,# Aesthetic golden mean ratio by default
text_fontsize=10, tick_labelsize=8, useTex=False):
"""
Updates a set of parameters within the the pylab run command parameters dictionary 'pylab.rcParams'
in order to achieve nicely formatted figures.
Inputs:
fig_width_pt - figure width in points. If you want to use your figure inside LaTeX,
get this value from LaTeX using '\showthe\columnwidth'
ratio - ratio between the height and the width of the figure
text_fontsize - size of axes and in-pic text fonts
tick_labelsize - size of tick label font
useTex - enables or disables the use of LaTeX for all labels and texts
(for details on how to do that, see http://www.scipy.org/Cookbook/Matplotlib/UsingTex)
"""
pylab.rcParams.update(pylab_params(fig_width_pt=fig_width_pt, ratio=ratio, text_fontsize=text_fontsize, \
tick_labelsize=tick_labelsize, useTex=useTex))
####################################################################
# SPECIAL PLOTTING FUNCTIONS AND CLASSES FOR SPECIFIC REQUIREMENTS #
####################################################################
def save_2D_image(mat, filename):
"""
Saves a 2D numpy array of gray shades between 0 and 1 to a PNG file.
Inputs:
mat - a 2D numpy array of floats between 0 and 1
filename - string specifying the filename where to save the data, has to end on '.png'
Example:
>> import numpy
>> a = numpy.random.random([100,100]) # creates a 2D numpy array with random values between 0. and 1.
>> save_2D_image(a,'randomarray100x100.png')
"""
assert PILIMAGEUSE, "ERROR: Since PIL has not been detected, the function NeuroTools.plotting.save_2D_image(...) is not supported!"
matConditionStr = "ERROR: First argument of function NeuroTools.plotting.imsave(...) must be a 2D numpy array of floats between 0. and 1.!"
filenameConditionStr = "ERROR: Second argument of function NeuroTools.plotting.imsave(...) must be a string ending on \".png\"!"
assert (type(mat) == numpy.ndarray) and (mat.ndim == 2) and (mat.min() >= 0.) and (mat.max() <= 1.), matConditionStr
assert (type(filename) == str) and (len(filename) > 4) and (filename[-4:].lower() == '.png'), filenameConditionStr
mode = 'L'
# PIL asks for a permuted (col,line) shape coresponding to the natural (x,y) space
pilImage = Image.new(mode, (mat.shape[1], mat.shape[0]))
data = numpy.floor(numpy.ravel(mat) * 256.)
pilImage.putdata(data)
pilImage.save(filename)
def save_2D_movie(frame_list, filename, frame_duration):
"""
Saves a list of 2D numpy arrays of gray shades between 0 and 1 to a zipped tree of PNG files.
Inputs:
frame_list - a list of 2D numpy arrays of floats between 0 and 1
filename - string specifying the filename where to save the data, has to end on '.zip'
frame_duration - specifier for the duration per frame, will be stored as additional meta-data
Example:
>> import numpy
>> framelist = []
>> for i in range(100): framelist.append(numpy.random.random([100,100])) # creates a list of 2D numpy arrays with random values between 0. and 1.
>> save_2D_movie(framelist, 'randommovie100x100x100.zip', 0.1)
"""
try:
import zipfile
except ImportError:
raise ImportError("ERROR: Python module zipfile not found! Needed by NeuroTools.plotting.save_2D_movie(...)!")
try:
import StringIO
except ImportError:
raise ImportError("ERROR: Python module StringIO not found! Needed by NeuroTools.plotting.save_2D_movie(...)!")
assert PILIMAGEUSE, "ERROR: Since PIL has not been detected, the function NeuroTools.plotting.save_2D_movie(...) is not supported!"
filenameConditionStr = "ERROR: Second argument of function NeuroTools.plotting.save_2D_movie(...) must be a string ending on \".zip\"!"
assert (type(filename) == str) and (len(filename) > 4) and (filename[-4:].lower() == '.zip'), filenameConditionStr
zf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
container = filename[:-4] # remove .zip
frame_name_format = "frame%s.%dd.png" % ("%", pylab.ceil(pylab.log10(len(frame_list))))
for frame_num, frame in enumerate(frame_list):
frame_data = [(p,p,p) for p in frame.flat]
im = Image.new('RGB', frame.shape, 'white')
im.putdata(frame_data)
io = StringIO.StringIO()
im.save(io, format='png')
pngname = frame_name_format % frame_num
arcname = "%s/%s" % (container, pngname)
io.seek(0)
zf.writestr(arcname, io.read())
progress_bar(float(frame_num)/len(frame_list))
# add 'parameters' and 'frames' files to the zip archive
zf.writestr("%s/parameters" % container,
'frame_duration = %s' % frame_duration)
zf.writestr("%s/frames" % container,
'\n'.join(["frame%.3d.png" % i for i in range(len(frame_list))]))
zf.close()
class SimpleMultiplot(object):
"""
A figure consisting of multiple panels, all with the same datatype and
the same x-range.
"""
def __init__(self, nrows, ncolumns, title="", xlabel=None, ylabel=None,
scaling=('linear','linear')):
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.axes = []
self.all_panels = self.axes
self.nrows = nrows
self.ncolumns = ncolumns
self.n = nrows*ncolumns
self._curr_panel = 0
self.title = title
topmargin = 0.06
rightmargin = 0.02
bottommargin = 0.1
leftmargin=0.1
v_panelsep = 0.1*(1 - topmargin - bottommargin)/nrows #0.05
h_panelsep = 0.1*(1 - leftmargin - rightmargin)/ncolumns
panelheight = (1 - topmargin - bottommargin - (nrows-1)*v_panelsep)/nrows
panelwidth = (1 - leftmargin - rightmargin - (ncolumns-1)*h_panelsep)/ncolumns
assert panelheight > 0
bottomlist = [bottommargin + i*v_panelsep + i*panelheight for i in range(nrows)]
leftlist = [leftmargin + j*h_panelsep + j*panelwidth for j in range(ncolumns)]
bottomlist.reverse()
for j in range(ncolumns):
for i in range(nrows):
ax = self.fig.add_axes([leftlist[j],bottomlist[i],panelwidth,panelheight])
self.set_frame(ax,[True,True,False,False])
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
self.axes.append(ax)
if xlabel:
self.axes[self.nrows-1].set_xlabel(xlabel)
if ylabel:
self.fig.text(0.5*leftmargin,0.5,ylabel,
rotation='vertical',
horizontalalignment='center',
verticalalignment='center')
if scaling == ("linear","linear"):
self.plot_function = "plot"
elif scaling == ("log", "log"):
self.plot_function = "loglog"
elif scaling == ("log", "linear"):
self.plot_function = "semilogx"
elif scaling == ("linear", "log"):
self.plot_function = "semilogy"
else:
raise Exception("Invalid value for scaling parameter")
def finalise(self):
"""Adjustments to be made after all panels have been plotted."""
# Turn off tick labels for all x-axes except the bottom one
self.fig.text(0.5, 0.99, self.title, horizontalalignment='center',
verticalalignment='top')
for ax in self.axes[0:self.nrows-1]+self.axes[self.nrows:]:
ax.xaxis.set_ticklabels([])
def save(self, filename):
"""Saves/prints the figure to file.
Inputs:
filename - string specifying the filename where to save the data
"""
self.finalise()
self.canvas.print_figure(filename)
def next_panel(self):
"""Changes to next panel within figure."""
ax = self.axes[self._curr_panel]
self._curr_panel += 1
if self._curr_panel >= self.n:
self._curr_panel = 0
ax.plot1 = getattr(ax, self.plot_function)
return ax
def panel(self, i):
"""Returns panel i."""
ax = self.axes[i]
ax.plot1 = getattr(ax, self.plot_function)
return ax
def set_frame(self, ax, boollist, linewidth=2):
"""
Defines frames for the chosen axis.
Inputs:
as - the targeted axis
boollist - a list
linewidth - the limits of the y axis
"""
assert type(boollist) in [list, numpy.ndarray]
assert len(boollist) == 4
if boollist != [True,True,True,True]:
bottom = Line2D([0, 1], [0, 0], transform=ax.transAxes, linewidth=linewidth, color='k')
left = Line2D([0, 0], [0, 1], transform=ax.transAxes, linewidth=linewidth, color='k')
top = Line2D([0, 1], [1, 1], transform=ax.transAxes, linewidth=linewidth, color='k')
right = Line2D([1, 0], [1, 1], transform=ax.transAxes, linewidth=linewidth, color='k')
ax.set_frame_on(False)
for side,draw in zip([left,bottom,right,top],boollist):
if draw:
ax.add_line(side)
|
svanschalkwyk/datafari
|
refs/heads/master
|
windows/python/Lib/test/test_future.py
|
137
|
# Test various flavors of legal and illegal future statements
import unittest
from test import test_support
import re
rx = re.compile('\((\S+).py, line (\d+)')
def get_error_location(msg):
mo = rx.search(str(msg))
return mo.group(1, 2)
class FutureTest(unittest.TestCase):
def test_future1(self):
test_support.unload('test_future1')
from test import test_future1
self.assertEqual(test_future1.result, 6)
def test_future2(self):
test_support.unload('test_future2')
from test import test_future2
self.assertEqual(test_future2.result, 6)
def test_future3(self):
test_support.unload('test_future3')
from test import test_future3
def test_badfuture3(self):
try:
from test import badsyntax_future3
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future3", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture4(self):
try:
from test import badsyntax_future4
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future4", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture5(self):
try:
from test import badsyntax_future5
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future5", '4'))
else:
self.fail("expected exception didn't occur")
def test_badfuture6(self):
try:
from test import badsyntax_future6
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future6", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture7(self):
try:
from test import badsyntax_future7
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future7", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture8(self):
try:
from test import badsyntax_future8
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future8", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture9(self):
try:
from test import badsyntax_future9
except SyntaxError, msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future9", '3'))
else:
self.fail("expected exception didn't occur")
def test_parserhack(self):
# test that the parser.c::future_hack function works as expected
# Note: although this test must pass, it's not testing the original
# bug as of 2.6 since the with statement is not optional and
# the parser hack disabled. If a new keyword is introduced in
# 2.6, change this to refer to the new future import.
try:
exec "from __future__ import print_function; print 0"
except SyntaxError:
pass
else:
self.fail("syntax error didn't occur")
try:
exec "from __future__ import (print_function); print 0"
except SyntaxError:
pass
else:
self.fail("syntax error didn't occur")
def test_multiple_features(self):
test_support.unload("test.test_future5")
from test import test_future5
def test_unicode_literals_exec(self):
scope = {}
exec "from __future__ import unicode_literals; x = ''" in scope
self.assertIsInstance(scope["x"], unicode)
def test_main():
test_support.run_unittest(FutureTest)
if __name__ == "__main__":
test_main()
|
Android-AOSP/external_skia
|
refs/heads/master
|
tools/roll_deps.py
|
68
|
#!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Skia's Chromium DEPS roll script.
This script:
- searches through the last N Skia git commits to find out the hash that is
associated with the SVN revision number.
- creates a new branch in the Chromium tree, modifies the DEPS file to
point at the given Skia commit, commits, uploads to Rietveld, and
deletes the local copy of the branch.
- creates a whitespace-only commit and uploads that to to Rietveld.
- returns the Chromium tree to its previous state.
To specify the location of the git executable, set the GIT_EXECUTABLE
environment variable.
Usage:
%prog -c CHROMIUM_PATH -r REVISION [OPTIONAL_OPTIONS]
"""
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
import git_utils
import misc_utils
DEFAULT_BOTS_LIST = [
'android_clang_dbg',
'android_dbg',
'android_rel',
'cros_daisy',
'linux',
'linux_asan',
'linux_chromeos',
'linux_chromeos_asan',
'linux_chromium_gn_dbg',
'linux_gpu',
'linux_layout',
'linux_layout_rel',
'mac',
'mac_asan',
'mac_gpu',
'mac_layout',
'mac_layout_rel',
'win',
'win_gpu',
'win_layout',
'win_layout_rel',
]
class DepsRollConfig(object):
"""Contains configuration options for this module.
Attributes:
git: (string) The git executable.
chromium_path: (string) path to a local chromium git repository.
save_branches: (boolean) iff false, delete temporary branches.
verbose: (boolean) iff false, suppress the output from git-cl.
search_depth: (int) how far back to look for the revision.
skia_url: (string) Skia's git repository.
self.skip_cl_upload: (boolean)
self.cl_bot_list: (list of strings)
"""
# pylint: disable=I0011,R0903,R0902
def __init__(self, options=None):
self.skia_url = 'https://skia.googlesource.com/skia.git'
self.revision_format = (
'git-svn-id: http://skia.googlecode.com/svn/trunk@%d ')
self.git = git_utils.git_executable()
if not options:
options = DepsRollConfig.GetOptionParser()
# pylint: disable=I0011,E1103
self.verbose = options.verbose
self.vsp = misc_utils.VerboseSubprocess(self.verbose)
self.save_branches = not options.delete_branches
self.search_depth = options.search_depth
self.chromium_path = options.chromium_path
self.skip_cl_upload = options.skip_cl_upload
# Split and remove empty strigns from the bot list.
self.cl_bot_list = [bot for bot in options.bots.split(',') if bot]
self.skia_git_checkout_path = options.skia_git_path
self.default_branch_name = 'autogenerated_deps_roll_branch'
self.reviewers_list = ','.join([
# 'rmistry@google.com',
# 'reed@google.com',
# 'bsalomon@google.com',
# 'robertphillips@google.com',
])
self.cc_list = ','.join([
# 'skia-team@google.com',
])
@staticmethod
def GetOptionParser():
# pylint: disable=I0011,C0103
"""Returns an optparse.OptionParser object.
Returns:
An optparse.OptionParser object.
Called by the main() function.
"""
option_parser = optparse.OptionParser(usage=__doc__)
# Anyone using this script on a regular basis should set the
# CHROMIUM_CHECKOUT_PATH environment variable.
option_parser.add_option(
'-c', '--chromium_path', help='Path to local Chromium Git'
' repository checkout, defaults to CHROMIUM_CHECKOUT_PATH'
' if that environment variable is set.',
default=os.environ.get('CHROMIUM_CHECKOUT_PATH'))
option_parser.add_option(
'-r', '--revision', type='int', default=None,
help='The Skia SVN revision number, defaults to top of tree.')
option_parser.add_option(
'-g', '--git_hash', default=None,
help='A partial Skia Git hash. Do not set this and revision.')
# Anyone using this script on a regular basis should set the
# SKIA_GIT_CHECKOUT_PATH environment variable.
option_parser.add_option(
'', '--skia_git_path',
help='Path of a pure-git Skia repository checkout. If empty,'
' a temporary will be cloned. Defaults to SKIA_GIT_CHECKOUT'
'_PATH, if that environment variable is set.',
default=os.environ.get('SKIA_GIT_CHECKOUT_PATH'))
option_parser.add_option(
'', '--search_depth', type='int', default=100,
help='How far back to look for the revision.')
option_parser.add_option(
'', '--delete_branches', help='Delete the temporary branches',
action='store_true', dest='delete_branches', default=False)
option_parser.add_option(
'', '--verbose', help='Do not suppress the output from `git cl`.',
action='store_true', dest='verbose', default=False)
option_parser.add_option(
'', '--skip_cl_upload', help='Skip the cl upload step; useful'
' for testing.',
action='store_true', default=False)
default_bots_help = (
'Comma-separated list of bots, defaults to a list of %d bots.'
' To skip `git cl try`, set this to an empty string.'
% len(DEFAULT_BOTS_LIST))
default_bots = ','.join(DEFAULT_BOTS_LIST)
option_parser.add_option(
'', '--bots', help=default_bots_help, default=default_bots)
return option_parser
class DepsRollError(Exception):
"""Exceptions specific to this module."""
pass
def get_svn_revision(config, commit):
"""Works in both git and git-svn. returns a string."""
svn_format = (
'(git-svn-id: [^@ ]+@|SVN changes up to revision |'
'LKGR w/ DEPS up to revision )(?P<return>[0-9]+)')
svn_revision = misc_utils.ReSearch.search_within_output(
config.verbose, svn_format, None,
[config.git, 'log', '-n', '1', '--format=format:%B', commit])
if not svn_revision:
raise DepsRollError(
'Revision number missing from Chromium origin/master.')
return int(svn_revision)
class SkiaGitCheckout(object):
"""Class to create a temporary skia git checkout, if necessary.
"""
# pylint: disable=I0011,R0903
def __init__(self, config, depth):
self._config = config
self._depth = depth
self._use_temp = None
self._original_cwd = None
def __enter__(self):
config = self._config
git = config.git
skia_dir = None
self._original_cwd = os.getcwd()
if config.skia_git_checkout_path:
if config.skia_git_checkout_path != os.curdir:
skia_dir = config.skia_git_checkout_path
## Update origin/master if needed.
if self._config.verbose:
print '~~$', 'cd', skia_dir
os.chdir(skia_dir)
config.vsp.check_call([git, 'fetch', '-q', 'origin'])
self._use_temp = None
else:
skia_dir = tempfile.mkdtemp(prefix='git_skia_tmp_')
self._use_temp = skia_dir
try:
os.chdir(skia_dir)
config.vsp.check_call(
[git, 'clone', '-q', '--depth=%d' % self._depth,
'--single-branch', config.skia_url, '.'])
except (OSError, subprocess.CalledProcessError) as error:
shutil.rmtree(skia_dir)
raise error
def __exit__(self, etype, value, traceback):
if self._config.skia_git_checkout_path != os.curdir:
if self._config.verbose:
print '~~$', 'cd', self._original_cwd
os.chdir(self._original_cwd)
if self._use_temp:
shutil.rmtree(self._use_temp)
def revision_and_hash(config):
"""Finds revision number and git hash of origin/master in the Skia tree.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
Returns:
A tuple (revision, hash)
revision: (int) SVN revision number.
git_hash: (string) full Git commit hash.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with SkiaGitCheckout(config, 1):
revision = get_svn_revision(config, 'origin/master')
git_hash = config.vsp.strip_output(
[config.git, 'show-ref', 'origin/master', '--hash'])
if not git_hash:
raise DepsRollError('Git hash can not be found.')
return revision, git_hash
def revision_and_hash_from_revision(config, revision):
"""Finds revision number and git hash of a commit in the Skia tree.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (int) SVN revision number.
Returns:
A tuple (revision, hash)
revision: (int) SVN revision number.
git_hash: (string) full Git commit hash.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with SkiaGitCheckout(config, config.search_depth):
revision_regex = config.revision_format % revision
git_hash = config.vsp.strip_output(
[config.git, 'log', '--grep', revision_regex,
'--format=format:%H', 'origin/master'])
if not git_hash:
raise DepsRollError('Git hash can not be found.')
return revision, git_hash
def revision_and_hash_from_partial(config, partial_hash):
"""Returns the SVN revision number and full git hash.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
partial_hash: (string) Partial git commit hash.
Returns:
A tuple (revision, hash)
revision: (int) SVN revision number.
git_hash: (string) full Git commit hash.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with SkiaGitCheckout(config, config.search_depth):
git_hash = config.vsp.strip_output(
['git', 'log', '-n', '1', '--format=format:%H', partial_hash])
if not git_hash:
raise DepsRollError('Partial Git hash can not be found.')
revision = get_svn_revision(config, git_hash)
return revision, git_hash
def change_skia_deps(revision, git_hash, depspath):
"""Update the DEPS file.
Modify the skia_revision and skia_hash entries in the given DEPS file.
Args:
revision: (int) Skia SVN revision.
git_hash: (string) Skia Git hash.
depspath: (string) path to DEPS file.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False,
prefix='skia_DEPS_ROLL_tmp_')
try:
deps_regex_rev = re.compile('"skia_revision": "[0-9]*",')
deps_regex_hash = re.compile('"skia_hash": "[0-9a-f]*",')
deps_regex_rev_repl = '"skia_revision": "%d",' % revision
deps_regex_hash_repl = '"skia_hash": "%s",' % git_hash
with open(depspath, 'r') as input_stream:
for line in input_stream:
line = deps_regex_rev.sub(deps_regex_rev_repl, line)
line = deps_regex_hash.sub(deps_regex_hash_repl, line)
temp_file.write(line)
finally:
temp_file.close()
shutil.move(temp_file.name, depspath)
def git_cl_uploader(config, message, file_list):
"""Create a commit in the current git branch; upload via git-cl.
Assumes that you are already on the branch you want to be on.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
message: (string) the commit message, can be multiline.
file_list: (list of strings) list of filenames to pass to `git add`.
Returns:
The output of `git cl issue`, if not config.skip_cl_upload, else ''.
"""
git, vsp = config.git, config.vsp
svn_info = str(get_svn_revision(config, 'HEAD'))
for filename in file_list:
assert os.path.exists(filename)
vsp.check_call([git, 'add', filename])
vsp.check_call([git, 'commit', '-q', '-m', message])
git_cl = [git, 'cl', 'upload', '-f',
'--bypass-hooks', '--bypass-watchlists']
if config.cc_list:
git_cl.append('--cc=%s' % config.cc_list)
if config.reviewers_list:
git_cl.append('--reviewers=%s' % config.reviewers_list)
git_try = [
git, 'cl', 'try', '-m', 'tryserver.chromium', '--revision', svn_info]
git_try.extend([arg for bot in config.cl_bot_list for arg in ('-b', bot)])
branch_name = git_utils.git_branch_name(vsp.verbose)
if config.skip_cl_upload:
space = ' '
print 'You should call:'
print '%scd %s' % (space, os.getcwd())
misc_utils.print_subprocess_args(space, [git, 'checkout', branch_name])
misc_utils.print_subprocess_args(space, git_cl)
if config.cl_bot_list:
misc_utils.print_subprocess_args(space, git_try)
print
return ''
else:
vsp.check_call(git_cl)
issue = vsp.strip_output([git, 'cl', 'issue'])
if config.cl_bot_list:
vsp.check_call(git_try)
return issue
def roll_deps(config, revision, git_hash):
"""Upload changed DEPS and a whitespace change.
Given the correct git_hash, create two Reitveld issues.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (int) Skia SVN revision.
git_hash: (string) Skia Git hash.
Returns:
a tuple containing textual description of the two issues.
Raises:
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
git = config.git
with misc_utils.ChangeDir(config.chromium_path, config.verbose):
config.vsp.check_call([git, 'fetch', '-q', 'origin'])
old_revision = misc_utils.ReSearch.search_within_output(
config.verbose, '"skia_revision": "(?P<return>[0-9]+)",', None,
[git, 'show', 'origin/master:DEPS'])
assert old_revision
if revision == int(old_revision):
print 'DEPS is up to date!'
return (None, None)
master_hash = config.vsp.strip_output(
[git, 'show-ref', 'origin/master', '--hash'])
master_revision = get_svn_revision(config, 'origin/master')
# master_hash[8] gives each whitespace CL a unique name.
if config.save_branches:
branch = 'control_%s' % master_hash[:8]
else:
branch = None
message = ('whitespace change %s\n\n'
'Chromium base revision: %d / %s\n\n'
'This CL was created by Skia\'s roll_deps.py script.\n'
) % (master_hash[:8], master_revision, master_hash[:8])
with git_utils.ChangeGitBranch(branch, 'origin/master',
config.verbose):
branch = git_utils.git_branch_name(config.vsp.verbose)
with open('build/whitespace_file.txt', 'a') as output_stream:
output_stream.write('\nCONTROL\n')
whitespace_cl = git_cl_uploader(
config, message, ['build/whitespace_file.txt'])
control_url = misc_utils.ReSearch.search_within_string(
whitespace_cl, '(?P<return>https?://[^) ]+)', '?')
if config.save_branches:
whitespace_cl = '%s\n branch: %s' % (whitespace_cl, branch)
if config.save_branches:
branch = 'roll_%d_%s' % (revision, master_hash[:8])
else:
branch = None
message = (
'roll skia DEPS to %d\n\n'
'Chromium base revision: %d / %s\n'
'Old Skia revision: %s\n'
'New Skia revision: %d\n'
'Control CL: %s\n\n'
'This CL was created by Skia\'s roll_deps.py script.\n\n'
'Bypassing commit queue trybots:\n'
'NOTRY=true\n'
% (revision, master_revision, master_hash[:8],
old_revision, revision, control_url))
with git_utils.ChangeGitBranch(branch, 'origin/master',
config.verbose):
branch = git_utils.git_branch_name(config.vsp.verbose)
change_skia_deps(revision, git_hash, 'DEPS')
deps_cl = git_cl_uploader(config, message, ['DEPS'])
if config.save_branches:
deps_cl = '%s\n branch: %s' % (deps_cl, branch)
return deps_cl, whitespace_cl
def find_hash_and_roll_deps(config, revision=None, partial_hash=None):
"""Call find_hash_from_revision() and roll_deps().
The calls to git will be verbose on standard output. After a
successful upload of both issues, print links to the new
codereview issues.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (int or None) the Skia SVN revision number or None
to use the tip of the tree.
partial_hash: (string or None) a partial pure-git Skia commit
hash. Don't pass both partial_hash and revision.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
if revision and partial_hash:
raise DepsRollError('Pass revision or partial_hash, not both.')
if partial_hash:
revision, git_hash = revision_and_hash_from_partial(
config, partial_hash)
elif revision:
revision, git_hash = revision_and_hash_from_revision(config, revision)
else:
revision, git_hash = revision_and_hash(config)
print 'revision=%r\nhash=%r\n' % (revision, git_hash)
deps_issue, whitespace_issue = roll_deps(config, revision, git_hash)
if deps_issue and whitespace_issue:
print 'DEPS roll:\n %s\n' % deps_issue
print 'Whitespace change:\n %s\n' % whitespace_issue
else:
print >> sys.stderr, 'No issues created.'
def main(args):
"""main function; see module-level docstring and GetOptionParser help.
Args:
args: sys.argv[1:]-type argument list.
"""
option_parser = DepsRollConfig.GetOptionParser()
options = option_parser.parse_args(args)[0]
if not options.chromium_path:
option_parser.error('Must specify chromium_path.')
if not os.path.isdir(options.chromium_path):
option_parser.error('chromium_path must be a directory.')
if not git_utils.git_executable():
option_parser.error('Invalid git executable.')
config = DepsRollConfig(options)
find_hash_and_roll_deps(config, options.revision, options.git_hash)
if __name__ == '__main__':
main(sys.argv[1:])
|
TakeshiTseng/ryu
|
refs/heads/master
|
ryu/ofproto/ofproto_v1_5_parser.py
|
5
|
# Copyright (C) 2012, 2013, 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012, 2013 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Decoder/Encoder implementations of OpenFlow 1.5.
"""
import struct
import base64
import six
from ryu.lib import addrconv
from ryu.lib.pack_utils import msg_pack_into
from ryu.lib.packet import packet
from ryu import exception
from ryu import utils
from ryu.ofproto.ofproto_parser import StringifyMixin, MsgBase, MsgInMsgBase
from ryu.ofproto import ether
from ryu.ofproto import nx_actions
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_common
from ryu.ofproto import ofproto_v1_5 as ofproto
_MSG_PARSERS = {}
def _set_msg_type(msg_type):
def _set_cls_msg_type(cls):
cls.cls_msg_type = msg_type
return cls
return _set_cls_msg_type
def _register_parser(cls):
'''class decorator to register msg parser'''
assert cls.cls_msg_type is not None
assert cls.cls_msg_type not in _MSG_PARSERS
_MSG_PARSERS[cls.cls_msg_type] = cls.parser
return cls
@ofproto_parser.register_msg_parser(ofproto.OFP_VERSION)
def msg_parser(datapath, version, msg_type, msg_len, xid, buf):
parser = _MSG_PARSERS.get(msg_type)
return parser(datapath, version, msg_type, msg_len, xid, buf)
@_register_parser
@_set_msg_type(ofproto.OFPT_HELLO)
class OFPHello(MsgBase):
"""
Hello message
When connection is started, the hello message is exchanged between a
switch and a controller.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
elements list of ``OFPHelloElemVersionBitmap`` instance
========== =========================================================
"""
def __init__(self, datapath, elements=None):
elements = elements if elements else []
super(OFPHello, self).__init__(datapath)
self.elements = elements
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPHello, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
offset = ofproto.OFP_HELLO_HEADER_SIZE
elems = []
while offset < msg.msg_len:
type_, length = struct.unpack_from(
ofproto.OFP_HELLO_ELEM_HEADER_PACK_STR, msg.buf, offset)
# better to register Hello Element classes but currently
# Only VerisonBitmap is supported so let's be simple.
if type_ == ofproto.OFPHET_VERSIONBITMAP:
elem = OFPHelloElemVersionBitmap.parser(msg.buf, offset)
elems.append(elem)
offset += length
msg.elements = elems
return msg
class OFPHelloElemVersionBitmap(StringifyMixin):
"""
Version bitmap Hello Element
========== =========================================================
Attribute Description
========== =========================================================
versions list of versions of OpenFlow protocol a device supports
========== =========================================================
"""
def __init__(self, versions, type_=None, length=None):
super(OFPHelloElemVersionBitmap, self).__init__()
self.type = ofproto.OFPHET_VERSIONBITMAP
self.length = None
self._bitmaps = None
self.versions = versions
@classmethod
def parser(cls, buf, offset):
type_, length = struct.unpack_from(
ofproto.OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR,
buf, offset)
assert type_ == ofproto.OFPHET_VERSIONBITMAP
bitmaps_len = (length -
ofproto.OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE)
offset += ofproto.OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE
bitmaps = []
while bitmaps_len >= 4:
bitmap = struct.unpack_from('!I', buf, offset)
bitmaps.append(bitmap[0])
offset += 4
bitmaps_len -= 4
versions = [i * 32 + shift
for i, bitmap in enumerate(bitmaps)
for shift in range(31) if bitmap & (1 << shift)]
elem = cls(versions)
elem.length = length
elem._bitmaps = bitmaps
return elem
@_register_parser
@_set_msg_type(ofproto.OFPT_ECHO_REQUEST)
class OFPEchoRequest(MsgBase):
"""
Echo request message
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
data An arbitrary length data
========== =========================================================
Example::
def send_echo_request(self, datapath, data):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPEchoRequest(datapath, data)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPEchoRequest,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_request_handler(self, ev):
self.logger.debug('OFPEchoRequest received: data=%s',
utils.hex_array(ev.msg.data))
"""
def __init__(self, datapath, data=None):
super(OFPEchoRequest, self).__init__(datapath)
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPEchoRequest, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.data = msg.buf[ofproto.OFP_HEADER_SIZE:]
return msg
def _serialize_body(self):
if self.data is not None:
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_ERROR)
class OFPErrorMsg(MsgBase):
"""
Error message
The switch notifies controller of problems by this message.
========== =========================================================
Attribute Description
========== =========================================================
type High level type of error
code Details depending on the type
data Variable length data depending on the type and code
========== =========================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
Types and codes are defined in ``ryu.ofproto.ofproto``.
============================= ===========
Type Code
============================= ===========
OFPET_HELLO_FAILED OFPHFC_*
OFPET_BAD_REQUEST OFPBRC_*
OFPET_BAD_ACTION OFPBAC_*
OFPET_BAD_INSTRUCTION OFPBIC_*
OFPET_BAD_MATCH OFPBMC_*
OFPET_FLOW_MOD_FAILED OFPFMFC_*
OFPET_GROUP_MOD_FAILED OFPGMFC_*
OFPET_PORT_MOD_FAILED OFPPMFC_*
OFPET_TABLE_MOD_FAILED OFPTMFC_*
OFPET_QUEUE_OP_FAILED OFPQOFC_*
OFPET_SWITCH_CONFIG_FAILED OFPSCFC_*
OFPET_ROLE_REQUEST_FAILED OFPRRFC_*
OFPET_METER_MOD_FAILED OFPMMFC_*
OFPET_TABLE_FEATURES_FAILED OFPTFFC_*
OFPET_EXPERIMENTER N/A
============================= ===========
Example::
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPErrorMsg received: type=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, utils.hex_array(msg.data))
"""
def __init__(self, datapath, type_=None, code=None, data=None):
super(OFPErrorMsg, self).__init__(datapath)
self.type = type_
self.code = code
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
type_, = struct.unpack_from('!H', six.binary_type(buf),
ofproto.OFP_HEADER_SIZE)
if type_ == ofproto.OFPET_EXPERIMENTER:
return OFPErrorExperimenterMsg.parser(datapath, version, msg_type,
msg_len, xid, buf)
msg = super(OFPErrorMsg, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.type, msg.code = struct.unpack_from(
ofproto.OFP_ERROR_MSG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_ERROR_MSG_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_ERROR_MSG_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE, self.type, self.code)
self.buf += self.data
class OFPErrorExperimenterMsg(MsgBase):
def __init__(self, datapath, type_=None, exp_type=None, experimenter=None,
data=None):
super(OFPErrorExperimenterMsg, self).__init__(datapath)
self.type = ofproto.OFPET_EXPERIMENTER
self.exp_type = exp_type
self.experimenter = experimenter
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
cls.cls_msg_type = msg_type
msg = super(OFPErrorExperimenterMsg, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
msg.type, msg.exp_type, msg.experimenter = struct.unpack_from(
ofproto.OFP_ERROR_EXPERIMENTER_MSG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_ERROR_EXPERIMENTER_MSG_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_ERROR_EXPERIMENTER_MSG_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.type, self.exp_type, self.experimenter)
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_ECHO_REPLY)
class OFPEchoReply(MsgBase):
"""
Echo reply message
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
data An arbitrary length data
========== =========================================================
Example::
def send_echo_reply(self, datapath, data):
ofp_parser = datapath.ofproto_parser
reply = ofp_parser.OFPEchoReply(datapath, data)
datapath.send_msg(reply)
@set_ev_cls(ofp_event.EventOFPEchoReply,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_reply_handler(self, ev):
self.logger.debug('OFPEchoReply received: data=%s',
utils.hex_array(ev.msg.data))
"""
def __init__(self, datapath, data=None):
super(OFPEchoReply, self).__init__(datapath)
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPEchoReply, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.data = msg.buf[ofproto.OFP_HEADER_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
self.buf += self.data
@_set_msg_type(ofproto.OFPT_FEATURES_REQUEST)
class OFPFeaturesRequest(MsgBase):
"""
Features request message
The controller sends a feature request to the switch upon session
establishment.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
Example::
def send_features_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPFeaturesRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPFeaturesRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_EXPERIMENTER)
class OFPExperimenter(MsgBase):
"""
Experimenter extension message
============= =========================================================
Attribute Description
============= =========================================================
experimenter Experimenter ID
exp_type Experimenter defined
data Experimenter defined arbitrary additional data
============= =========================================================
"""
def __init__(self, datapath, experimenter=None, exp_type=None, data=None):
super(OFPExperimenter, self).__init__(datapath)
self.experimenter = experimenter
self.exp_type = exp_type
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPExperimenter, cls).parser(datapath, version,
msg_type, msg_len,
xid, buf)
(msg.experimenter, msg.exp_type) = struct.unpack_from(
ofproto.OFP_EXPERIMENTER_HEADER_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_EXPERIMENTER_HEADER_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_EXPERIMENTER_HEADER_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.experimenter, self.exp_type)
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_FEATURES_REPLY)
class OFPSwitchFeatures(MsgBase):
"""
Features reply message
The switch responds with a features reply message to a features
request.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
Example::
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPSwitchFeatures received: '
'datapath_id=0x%016x n_buffers=%d '
'n_tables=%d auxiliary_id=%d '
'capabilities=0x%08x',
msg.datapath_id, msg.n_buffers, msg.n_tables,
msg.auxiliary_id, msg.capabilities)
"""
def __init__(self, datapath, datapath_id=None, n_buffers=None,
n_tables=None, auxiliary_id=None, capabilities=None):
super(OFPSwitchFeatures, self).__init__(datapath)
self.datapath_id = datapath_id
self.n_buffers = n_buffers
self.n_tables = n_tables
self.auxiliary_id = auxiliary_id
self.capabilities = capabilities
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPSwitchFeatures, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.datapath_id,
msg.n_buffers,
msg.n_tables,
msg.auxiliary_id,
msg.capabilities,
msg._reserved) = struct.unpack_from(
ofproto.OFP_SWITCH_FEATURES_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
@_set_msg_type(ofproto.OFPT_GET_CONFIG_REQUEST)
class OFPGetConfigRequest(MsgBase):
"""
Get config request message
The controller sends a get config request to query configuration
parameters in the switch.
Example::
def send_get_config_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGetConfigRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPGetConfigRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_GET_CONFIG_REPLY)
class OFPGetConfigReply(MsgBase):
"""
Get config reply message
The switch responds to a configuration request with a get config reply
message.
============= =========================================================
Attribute Description
============= =========================================================
flags Bitmap of the following flags.
| OFPC_FRAG_NORMAL
| OFPC_FRAG_DROP
| OFPC_FRAG_REASM
miss_send_len Max bytes of new flow that datapath should send to the
controller
============= =========================================================
Example::
@set_ev_cls(ofp_event.EventOFPGetConfigReply, MAIN_DISPATCHER)
def get_config_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
flags = []
if msg.flags & ofp.OFPC_FRAG_NORMAL:
flags.append('NORMAL')
if msg.flags & ofp.OFPC_FRAG_DROP:
flags.append('DROP')
if msg.flags & ofp.OFPC_FRAG_REASM:
flags.append('REASM')
self.logger.debug('OFPGetConfigReply received: '
'flags=%s miss_send_len=%d',
','.join(flags), msg.miss_send_len)
"""
def __init__(self, datapath, flags=None, miss_send_len=None):
super(OFPGetConfigReply, self).__init__(datapath)
self.flags = flags
self.miss_send_len = miss_send_len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPGetConfigReply, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.flags, msg.miss_send_len = struct.unpack_from(
ofproto.OFP_SWITCH_CONFIG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
@_set_msg_type(ofproto.OFPT_SET_CONFIG)
class OFPSetConfig(MsgBase):
"""
Set config request message
The controller sends a set config request message to set configuraion
parameters.
============= =========================================================
Attribute Description
============= =========================================================
flags Bitmap of the following flags.
| OFPC_FRAG_NORMAL
| OFPC_FRAG_DROP
| OFPC_FRAG_REASM
miss_send_len Max bytes of new flow that datapath should send to the
controller
============= =========================================================
Example::
def send_set_config(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPSetConfig(datapath, ofp.OFPC_FRAG_NORMAL, 256)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, miss_send_len=0):
super(OFPSetConfig, self).__init__(datapath)
self.flags = flags
self.miss_send_len = miss_send_len
def _serialize_body(self):
assert self.flags is not None
assert self.miss_send_len is not None
msg_pack_into(ofproto.OFP_SWITCH_CONFIG_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.flags, self.miss_send_len)
class OFPMatch(StringifyMixin):
"""
Flow Match Structure
This class is implementation of the flow match structure having
compose/query API.
You can define the flow match by the keyword arguments.
The following arguments are available.
================ =============== ==================================
Argument Value Description
================ =============== ==================================
in_port Integer 32bit Switch input port
in_phy_port Integer 32bit Switch physical input port
metadata Integer 64bit Metadata passed between tables
eth_dst MAC address Ethernet destination address
eth_src MAC address Ethernet source address
eth_type Integer 16bit Ethernet frame type
vlan_vid Integer 16bit VLAN id
vlan_pcp Integer 8bit VLAN priority
ip_dscp Integer 8bit IP DSCP (6 bits in ToS field)
ip_ecn Integer 8bit IP ECN (2 bits in ToS field)
ip_proto Integer 8bit IP protocol
ipv4_src IPv4 address IPv4 source address
ipv4_dst IPv4 address IPv4 destination address
tcp_src Integer 16bit TCP source port
tcp_dst Integer 16bit TCP destination port
udp_src Integer 16bit UDP source port
udp_dst Integer 16bit UDP destination port
sctp_src Integer 16bit SCTP source port
sctp_dst Integer 16bit SCTP destination port
icmpv4_type Integer 8bit ICMP type
icmpv4_code Integer 8bit ICMP code
arp_op Integer 16bit ARP opcode
arp_spa IPv4 address ARP source IPv4 address
arp_tpa IPv4 address ARP target IPv4 address
arp_sha MAC address ARP source hardware address
arp_tha MAC address ARP target hardware address
ipv6_src IPv6 address IPv6 source address
ipv6_dst IPv6 address IPv6 destination address
ipv6_flabel Integer 32bit IPv6 Flow Label
icmpv6_type Integer 8bit ICMPv6 type
icmpv6_code Integer 8bit ICMPv6 code
ipv6_nd_target IPv6 address Target address for ND
ipv6_nd_sll MAC address Source link-layer for ND
ipv6_nd_tll MAC address Target link-layer for ND
mpls_label Integer 32bit MPLS label
mpls_tc Integer 8bit MPLS TC
mpls_bos Integer 8bit MPLS BoS bit
pbb_isid Integer 24bit PBB I-SID
tunnel_id Integer 64bit Logical Port Metadata
ipv6_exthdr Integer 16bit IPv6 Extension Header pseudo-field
pbb_uca Integer 8bit PBB UCA header field
tcp_flags Integer 16bit TCP flags
actset_output Integer 32bit Output port from action set metadata
packet_type Integer 32bit Packet type value
================ =============== ==================================
Example::
>>> # compose
>>> match = parser.OFPMatch(
... in_port=1,
... eth_type=0x86dd,
... ipv6_src=('2001:db8:bd05:1d2:288a:1fc0:1:10ee',
... 'ffff:ffff:ffff:ffff::'),
... ipv6_dst='2001:db8:bd05:1d2:288a:1fc0:1:10ee')
>>> # query
>>> if 'ipv6_src' in match:
... print match['ipv6_src']
...
('2001:db8:bd05:1d2:288a:1fc0:1:10ee', 'ffff:ffff:ffff:ffff::')
.. Note::
For the list of the supported Nicira experimenter matches,
please refer to :ref:`ryu.ofproto.nx_match <nx_match_structures>`.
.. Note::
For VLAN id match field, special values are defined in OpenFlow Spec.
1) Packets with and without a VLAN tag
- Example::
match = parser.OFPMatch()
- Packet Matching
====================== =====
non-VLAN-tagged MATCH
VLAN-tagged(vlan_id=3) MATCH
VLAN-tagged(vlan_id=5) MATCH
====================== =====
2) Only packets without a VLAN tag
- Example::
match = parser.OFPMatch(vlan_vid=0x0000)
- Packet Matching
====================== =====
non-VLAN-tagged MATCH
VLAN-tagged(vlan_id=3) x
VLAN-tagged(vlan_id=5) x
====================== =====
3) Only packets with a VLAN tag regardless of its value
- Example::
match = parser.OFPMatch(vlan_vid=(0x1000, 0x1000))
- Packet Matching
====================== =====
non-VLAN-tagged x
VLAN-tagged(vlan_id=3) MATCH
VLAN-tagged(vlan_id=5) MATCH
====================== =====
4) Only packets with VLAN tag and VID equal
- Example::
match = parser.OFPMatch(vlan_vid=(0x1000 | 3))
- Packet Matching
====================== =====
non-VLAN-tagged x
VLAN-tagged(vlan_id=3) MATCH
VLAN-tagged(vlan_id=5) x
====================== =====
"""
def __init__(self, type_=None, length=None, _ordered_fields=None,
**kwargs):
super(OFPMatch, self).__init__()
self.type = ofproto.OFPMT_OXM
self.length = length
if _ordered_fields is not None:
assert not kwargs
self._fields2 = _ordered_fields
else:
kwargs = dict(ofproto.oxm_normalize_user(k, v) for
(k, v) in kwargs.items())
fields = [ofproto.oxm_from_user(k, v) for (k, v)
in kwargs.items()]
# assumption: sorting by OXM type values makes fields
# meet ordering requirements (eg. eth_type before ipv4_src)
fields.sort(
key=lambda x: x[0][0] if isinstance(x[0], tuple) else x[0])
self._fields2 = [ofproto.oxm_to_user(n, v, m) for (n, v, m)
in fields]
@classmethod
def parser(cls, buf, offset):
"""
Returns an object which is generated from a buffer including the
expression of the wire protocol of the flow match.
"""
match = OFPMatch()
type_, length = struct.unpack_from('!HH', buf, offset)
match.type = type_
match.length = length
# ofp_match adjustment
offset += 4
length -= 4
fields = []
while length > 0:
n, value, mask, field_len = ofproto.oxm_parse(buf, offset)
k, uv = ofproto.oxm_to_user(n, value, mask)
fields.append((k, uv))
offset += field_len
length -= field_len
match._fields2 = fields
return match
def serialize(self, buf, offset):
"""
Outputs the expression of the wire protocol of the flow match into
the buf.
Returns the output length.
"""
fields = [ofproto.oxm_from_user(k, uv) for (k, uv)
in self._fields2]
hdr_pack_str = '!HH'
field_offset = offset + struct.calcsize(hdr_pack_str)
for (n, value, mask) in fields:
field_offset += ofproto.oxm_serialize(n, value, mask, buf,
field_offset)
length = field_offset - offset
msg_pack_into(hdr_pack_str, buf, offset, ofproto.OFPMT_OXM, length)
self.length = length
pad_len = utils.round_up(length, 8) - length
msg_pack_into("%dx" % pad_len, buf, field_offset)
return length + pad_len
def __getitem__(self, key):
return dict(self._fields2)[key]
def __contains__(self, key):
return key in dict(self._fields2)
def iteritems(self):
return iter(dict(self._fields2).items())
def items(self):
return self._fields2
def get(self, key, default=None):
return dict(self._fields2).get(key, default)
def stringify_attrs(self):
yield "oxm_fields", dict(self._fields2)
def to_jsondict(self):
"""
Returns a dict expressing the flow match.
"""
body = {"oxm_fields": [ofproto.oxm_to_jsondict(k, uv) for k, uv
in self._fields2],
"length": self.length,
"type": self.type}
return {self.__class__.__name__: body}
@classmethod
def from_jsondict(cls, dict_):
"""
Returns an object which is generated from a dict.
Exception raises:
KeyError -- Unknown match field is defined in dict
"""
fields = [ofproto.oxm_from_jsondict(f) for f
in dict_['oxm_fields']]
return OFPMatch(_ordered_fields=fields)
class OFPStats(StringifyMixin):
"""
Flow Stats Structure
This class is implementation of the flow stats structure having
compose/query API.
You can define the flow stats by the keyword arguments.
The following arguments are available.
============= ================ ============================================
Argument Value Description
============= ================ ============================================
duration Integer 32bit*2 Time flow entry has been alive. This field
is a tuple of two Integer 32bit. The first
value is duration_sec and the second is
duration_nsec.
idle_time Integer 32bit*2 Time flow entry has been idle.
flow_count Integer 32bit Number of aggregated flow entries.
packet_count Integer 64bit Number of packets matched by a flow entry.
byte_count Integer 64bit Number of bytes matched by a flow entry.
============= ================ ============================================
Example::
>>> # compose
>>> stats = parser.OFPStats(
... packet_count=100,
... duration=(100, 200)
>>> # query
>>> if 'duration' in stats:
... print stats['duration']
...
(100, 200)
"""
def __init__(self, length=None, _ordered_fields=None, **kwargs):
super(OFPStats, self).__init__()
self.length = length
if _ordered_fields is not None:
assert not kwargs
self.fields = _ordered_fields
else:
fields = [ofproto.oxs_from_user(k, v) for (k, v)
in kwargs.items()]
# sort by OXS type values
fields.sort(
key=lambda x: x[0][0] if isinstance(x[0], tuple) else x[0])
# No mask
self.fields = [ofproto.oxs_to_user(n, v, None) for (n, v, _)
in fields]
@classmethod
def parser(cls, buf, offset):
"""
Returns an object which is generated from a buffer including the
expression of the wire protocol of the flow stats.
"""
stats = OFPStats()
reserved, length = struct.unpack_from('!HH', buf, offset)
stats.length = length
# ofp_stats adjustment
offset += 4
length -= 4
fields = []
while length > 0:
n, value, _, field_len = ofproto.oxs_parse(buf, offset)
k, uv = ofproto.oxs_to_user(n, value, None) # No mask
fields.append((k, uv))
offset += field_len
length -= field_len
stats.fields = fields
return stats
def serialize(self, buf, offset):
"""
Outputs the expression of the wire protocol of the flow stats into
the buf.
Returns the output length.
"""
fields = [ofproto.oxs_from_user(k, uv) for (k, uv)
in self.fields]
hdr_pack_str = '!HH'
field_offset = offset + struct.calcsize(hdr_pack_str)
for (n, value, _) in fields:
# No mask
field_offset += ofproto.oxs_serialize(n, value, None, buf,
field_offset)
reserved = 0
length = field_offset - offset
msg_pack_into(hdr_pack_str, buf, offset, reserved, length)
self.length = length
pad_len = utils.round_up(length, 8) - length
msg_pack_into("%dx" % pad_len, buf, field_offset)
return length + pad_len
def __getitem__(self, key):
return dict(self.fields)[key]
def __contains__(self, key):
return key in dict(self.fields)
def iteritems(self):
return dict(self.fields).items()
def items(self):
return self.fields
def get(self, key, default=None):
return dict(self.fields).get(key, default)
def stringify_attrs(self):
yield "oxs_fields", dict(self.fields)
def to_jsondict(self):
"""
Returns a dict expressing the flow stats.
"""
body = {"oxs_fields": [ofproto.oxs_to_jsondict(k, uv) for k, uv
in self.fields],
"length": self.length}
return {self.__class__.__name__: body}
@classmethod
def from_jsondict(cls, dict_):
"""
Returns an object which is generated from a dict.
Exception raises:
KeyError -- Unknown stats field is defined in dict
"""
fields = [ofproto.oxs_from_jsondict(f) for f
in dict_['oxs_fields']]
return OFPStats(_ordered_fields=fields)
class OFPPropUnknown(StringifyMixin):
def __init__(self, type_=None, length=None, buf=None):
self.buf = buf
@classmethod
def parser(cls, buf):
return cls(buf=buf)
class OFPPropBase(StringifyMixin):
_PACK_STR = '!HH'
# _TYPES = {} must be an attribute of subclass
def __init__(self, type_, length=None):
self.type = type_
self.length = length
@classmethod
def register_type(cls, type_):
def _register_type(subcls):
cls._TYPES[type_] = subcls
return subcls
return _register_type
@classmethod
def parse(cls, buf):
(type_, length) = struct.unpack_from(cls._PACK_STR, buf, 0)
rest = buf[utils.round_up(length, 8):]
try:
subcls = cls._TYPES[type_]
except KeyError:
subcls = OFPPropUnknown
prop = subcls.parser(buf)
prop.type = type_
prop.length = length
return prop, rest
@classmethod
def get_rest(cls, buf):
(type_, length) = struct.unpack_from(cls._PACK_STR, buf, 0)
offset = struct.calcsize(cls._PACK_STR)
return buf[offset:length]
def serialize(self):
# Body
# serialize_body should be implemented by subclass
body = bytearray()
body += self.serialize_body()
# fixup
self.length = len(body) + struct.calcsize(self._PACK_STR)
# Header
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.length)
buf += body
# Pad
pad_len = utils.round_up(self.length, 8) - self.length
msg_pack_into("%dx" % pad_len, buf, len(buf))
return buf
class OFPPropCommonExperimenter4ByteData(StringifyMixin):
_PACK_STR = '!HHII'
_EXPERIMENTER_DATA_PACK_STR = '!I'
_EXPERIMENTER_DATA_SIZE = 4
def __init__(self, type_=None, length=None, experimenter=None,
exp_type=None, data=bytearray()):
self.type = type_
self.length = length
self.experimenter = experimenter
self.exp_type = exp_type
self.data = data
@classmethod
def parser(cls, buf):
(type_, length, experimenter, exp_type) = struct.unpack_from(
ofproto.OFP_PROP_EXPERIMENTER_PACK_STR, buf, 0)
rest = buf[ofproto.OFP_PROP_EXPERIMENTER_SIZE:length]
data = []
while rest:
(d,) = struct.unpack_from(
cls._EXPERIMENTER_DATA_PACK_STR, rest, 0)
data.append(d)
rest = rest[cls._EXPERIMENTER_DATA_SIZE:]
return cls(type_, length, experimenter, exp_type, data)
def serialize(self):
offset = 0
bin_data = bytearray()
for d in self.data:
msg_pack_into(self._EXPERIMENTER_DATA_PACK_STR,
bin_data, offset, d)
offset += self._EXPERIMENTER_DATA_SIZE
# fixup
self.length = struct.calcsize(self._PACK_STR)
self.length += len(bin_data)
buf = bytearray()
msg_pack_into(self._PACK_STR, buf,
0, self.type, self.length, self.experimenter,
self.exp_type)
buf += bin_data
# Pad
pad_len = utils.round_up(self.length, 8) - self.length
msg_pack_into("%dx" % pad_len, buf, len(buf))
return buf
class OFPPortDescProp(OFPPropBase):
_TYPES = {}
@OFPPortDescProp.register_type(ofproto.OFPPDPT_ETHERNET)
class OFPPortDescPropEthernet(OFPPortDescProp):
def __init__(self, type_=None, length=None, curr=None, advertised=None,
supported=None, peer=None, curr_speed=None, max_speed=None):
self.type = type_
self.length = length
self.curr = curr
self.advertised = advertised
self.supported = supported
self.peer = peer
self.curr_speed = curr_speed
self.max_speed = max_speed
@classmethod
def parser(cls, buf):
ether = cls()
(ether.type, ether.length, ether.curr,
ether.advertised, ether.supported,
ether.peer, ether.curr_speed, ether.max_speed) = struct.unpack_from(
ofproto.OFP_PORT_DESC_PROP_ETHERNET_PACK_STR, buf, 0)
return ether
@OFPPortDescProp.register_type(ofproto.OFPPDPT_OPTICAL)
class OFPPortDescPropOptical(OFPPortDescProp):
def __init__(self, type_=None, length=None, supported=None,
tx_min_freq_lmda=None, tx_max_freq_lmda=None,
tx_grid_freq_lmda=None, rx_min_freq_lmda=None,
rx_max_freq_lmda=None, rx_grid_freq_lmda=None,
tx_pwr_min=None, tx_pwr_max=None):
self.type = type_
self.length = length
self.supported = supported
self.tx_min_freq_lmda = tx_min_freq_lmda
self.tx_max_freq_lmda = tx_max_freq_lmda
self.tx_grid_freq_lmda = tx_grid_freq_lmda
self.rx_min_freq_lmda = rx_min_freq_lmda
self.rx_max_freq_lmda = rx_max_freq_lmda
self.rx_grid_freq_lmda = rx_grid_freq_lmda
self.tx_pwr_min = tx_pwr_min
self.tx_pwr_max = tx_pwr_max
@classmethod
def parser(cls, buf):
optical = cls()
(optical.type, optical.length, optical.supported,
optical.tx_min_freq_lmda, optical.tx_max_freq_lmda,
optical.tx_grid_freq_lmda, optical.rx_min_freq_lmda,
optical.rx_max_freq_lmda, optical.rx_grid_freq_lmda,
optical.tx_pwr_min, optical.tx_pwr_max) = struct.unpack_from(
ofproto.OFP_PORT_DESC_PROP_OPTICAL_PACK_STR, buf, 0)
return optical
@OFPPortDescProp.register_type(ofproto.OFPPDPT_PIPELINE_INPUT)
@OFPPortDescProp.register_type(ofproto.OFPPDPT_PIPELINE_OUTPUT)
class OFPPortDescPropOxm(OFPPortDescProp):
def __init__(self, type_=None, length=None, oxm_ids=None):
oxm_ids = oxm_ids if oxm_ids else []
super(OFPPortDescPropOxm, self).__init__(type_, length)
self.oxm_ids = oxm_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPOxmId.parse(rest)
ids.append(i)
return cls(oxm_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.oxm_ids:
bin_ids += i.serialize()
return bin_ids
@OFPPortDescProp.register_type(ofproto.OFPPDPT_RECIRCULATE)
class OFPPortDescPropRecirculate(OFPPortDescProp):
_PORT_NO_PACK_STR = '!I'
def __init__(self, type_=None, length=None, port_nos=None):
port_nos = port_nos if port_nos else []
super(OFPPortDescPropRecirculate, self).__init__(type_, length)
self.port_nos = port_nos
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
nos = []
while rest:
(n,) = struct.unpack_from(cls._PORT_NO_PACK_STR,
six.binary_type(rest), 0)
rest = rest[struct.calcsize(cls._PORT_NO_PACK_STR):]
nos.append(n)
return cls(port_nos=nos)
def serialize_body(self):
bin_nos = bytearray()
for n in self.port_nos:
bin_no = bytearray()
msg_pack_into(self._PORT_NO_PACK_STR, bin_no, 0, n)
bin_nos += bin_no
return bin_nos
@OFPPortDescProp.register_type(ofproto.OFPPDPT_EXPERIMENTER)
class OFPPortDescPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPTableModProp(OFPPropBase):
_TYPES = {}
@OFPTableModProp.register_type(ofproto.OFPTMPT_EVICTION)
class OFPTableModPropEviction(OFPTableModProp):
def __init__(self, type_=None, length=None, flags=None):
self.type = type_
self.length = length
self.flags = flags
@classmethod
def parser(cls, buf):
eviction = cls()
(eviction.type, eviction.length, eviction.flags) = struct.unpack_from(
ofproto.OFP_TABLE_MOD_PROP_EVICTION_PACK_STR, buf, 0)
return eviction
def serialize(self):
# fixup
self.length = ofproto.OFP_TABLE_MOD_PROP_EVICTION_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_TABLE_MOD_PROP_EVICTION_PACK_STR, buf, 0,
self.type, self.length, self.flags)
return buf
@OFPTableModProp.register_type(ofproto.OFPTMPT_VACANCY)
class OFPTableModPropVacancy(OFPTableModProp):
def __init__(self, type_=None, length=None, vacancy_down=None,
vacancy_up=None, vacancy=None):
self.type = type_
self.length = length
self.vacancy_down = vacancy_down
self.vacancy_up = vacancy_up
self.vacancy = vacancy
@classmethod
def parser(cls, buf):
vacancy = cls()
(vacancy.type, vacancy.length, vacancy.vacancy_down,
vacancy.vacancy_up, vacancy.vacancy) = struct.unpack_from(
ofproto.OFP_TABLE_MOD_PROP_VACANCY_PACK_STR, buf, 0)
return vacancy
def serialize(self):
# fixup
self.length = ofproto.OFP_TABLE_MOD_PROP_VACANCY_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_TABLE_MOD_PROP_VACANCY_PACK_STR, buf, 0,
self.type, self.length, self.vacancy_down,
self.vacancy_up, self.vacancy)
return buf
@OFPTableModProp.register_type(ofproto.OFPTMPT_EXPERIMENTER)
class OFPTableModPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPQueueDescProp(OFPPropBase):
_TYPES = {}
@OFPQueueDescProp.register_type(ofproto.OFPQDPT_MIN_RATE)
class OFPQueueDescPropMinRate(OFPQueueDescProp):
def __init__(self, type_=None, length=None, rate=None):
self.type = type_
self.length = length
self.rate = rate
@classmethod
def parser(cls, buf):
minrate = cls()
(minrate.type, minrate.length, minrate.rate) = struct.unpack_from(
ofproto.OFP_QUEUE_DESC_PROP_MIN_RATE_PACK_STR, buf, 0)
return minrate
@OFPQueueDescProp.register_type(ofproto.OFPQDPT_MAX_RATE)
class OFPQueueDescPropMaxRate(OFPQueueDescProp):
def __init__(self, type_=None, length=None, rate=None):
self.type = type_
self.length = length
self.rate = rate
@classmethod
def parser(cls, buf):
maxrate = cls()
(maxrate.type, maxrate.length, maxrate.rate) = struct.unpack_from(
ofproto.OFP_QUEUE_DESC_PROP_MAX_RATE_PACK_STR, buf, 0)
return maxrate
@OFPQueueDescProp.register_type(ofproto.OFPQDPT_EXPERIMENTER)
class OFPQueueDescPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPRoleProp(OFPPropBase):
_TYPES = {}
@OFPRoleProp.register_type(ofproto.OFPRPT_EXPERIMENTER)
class OFPRolePropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPTime(StringifyMixin):
def __init__(self, seconds=None, nanoseconds=None):
self.seconds = seconds
self.nanoseconds = nanoseconds
@classmethod
def parser(cls, buf, offset):
cls_ = cls()
(cls_.seconds, cls_.nanoseconds) = struct.unpack_from(
ofproto.OFP_TIME_PACK_STR, buf, offset)
return cls_
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_TIME_PACK_STR, buf, offset,
self.seconds, self.nanoseconds)
return ofproto.OFP_TIME_SIZE
class OFPBundleProp(OFPPropBase):
_TYPES = {}
@OFPBundleProp.register_type(ofproto.OFPBPT_TIME)
class OFPBundlePropTime(OFPBundleProp):
def __init__(self, type_=None, length=None, scheduled_time=None):
super(OFPBundlePropTime, self).__init__(type_, length)
self.scheduled_time = scheduled_time
@classmethod
def parser(cls, buf):
prop = cls()
offset = ofproto.OFP_BUNDLE_PROP_TIME_PACK_STR0_SIZE
prop.scheduled_time = OFPTime.parser(buf, offset)
return prop
def serialize(self):
# fixup
self.length = ofproto.OFP_BUNDLE_PROP_TIME_PACK_STR_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_BUNDLE_PROP_TIME_PACK_STR0, buf, 0,
self.type, self.length)
offset = ofproto.OFP_BUNDLE_PROP_TIME_PACK_STR0_SIZE
self.scheduled_time.serialize(buf, offset)
return buf
@OFPBundleProp.register_type(ofproto.OFPRPT_EXPERIMENTER)
class OFPBundlePropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
@_register_parser
@_set_msg_type(ofproto.OFPT_PACKET_IN)
class OFPPacketIn(MsgBase):
"""
Packet-In message
The switch sends the packet that received to the controller by this
message.
============= =========================================================
Attribute Description
============= =========================================================
buffer_id ID assigned by datapath
total_len Full length of frame
reason Reason packet is being sent.
| OFPR_TABLE_MISS
| OFPR_APPLY_ACTION
| OFPR_INVALID_TTL
| OFPR_ACTION_SET
| OFPR_GROUP
| OFPR_PACKET_OUT
table_id ID of the table that was looked up
cookie Cookie of the flow entry that was looked up
match Instance of ``OFPMatch``
data Ethernet frame
============= =========================================================
Example::
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.TABLE_MISS:
reason = 'TABLE MISS'
elif msg.reason == ofp.OFPR_APPLY_ACTION:
reason = 'APPLY ACTION'
elif msg.reason == ofp.OFPR_INVALID_TTL:
reason = 'INVALID TTL'
elif msg.reason == ofp.OFPR_ACTION_SET:
reason = 'ACTION SET'
elif msg.reason == ofp.OFPR_GROUP:
reason = 'GROUP'
elif msg.reason == ofp.OFPR_PACKET_OUT:
reason = 'PACKET OUT'
else:
reason = 'unknown'
self.logger.debug('OFPPacketIn received: '
'buffer_id=%x total_len=%d reason=%s '
'table_id=%d cookie=%d match=%s data=%s',
msg.buffer_id, msg.total_len, reason,
msg.table_id, msg.cookie, msg.match,
utils.hex_array(msg.data))
"""
def __init__(self, datapath, buffer_id=None, total_len=None, reason=None,
table_id=None, cookie=None, match=None, data=None):
super(OFPPacketIn, self).__init__(datapath)
self.buffer_id = buffer_id
self.total_len = total_len
self.reason = reason
self.table_id = table_id
self.cookie = cookie
self.match = match
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPPacketIn, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.buffer_id, msg.total_len, msg.reason,
msg.table_id, msg.cookie) = struct.unpack_from(
ofproto.OFP_PACKET_IN_PACK_STR,
msg.buf, ofproto.OFP_HEADER_SIZE)
msg.match = OFPMatch.parser(msg.buf, ofproto.OFP_PACKET_IN_SIZE -
ofproto.OFP_MATCH_SIZE)
match_len = utils.round_up(msg.match.length, 8)
msg.data = msg.buf[(ofproto.OFP_PACKET_IN_SIZE -
ofproto.OFP_MATCH_SIZE + match_len + 2):]
if msg.total_len < len(msg.data):
# discard padding for 8-byte alignment of OFP packet
msg.data = msg.data[:msg.total_len]
return msg
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_REMOVED)
class OFPFlowRemoved(MsgBase):
"""
Flow removed message
When flow entries time out or are deleted, the switch notifies controller
with this message.
================ ======================================================
Attribute Description
================ ======================================================
table_id ID of the table
reason One of the following values.
| OFPRR_IDLE_TIMEOUT
| OFPRR_HARD_TIMEOUT
| OFPRR_DELETE
| OFPRR_GROUP_DELETE
| OFPRR_METER_DELETE
| OFPRR_EVICTION
priority Priority level of flow entry
idle_timeout Idle timeout from original flow mod
hard_timeout Hard timeout from original flow mod
cookie Opaque controller-issued identifier
match Instance of ``OFPMatch``
stats Instance of ``OFPStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
elif msg.reason == ofp.OFPRR_METER_DELETE:
reason = 'METER DELETE'
elif msg.reason == ofp.OFPRR_EVICTION:
reason = 'EVICTION'
else:
reason = 'unknown'
self.logger.debug('OFPFlowRemoved received: '
'table_id=%d reason=%s priority=%d '
'idle_timeout=%d hard_timeout=%d cookie=%d '
'match=%s stats=%s',
msg.table_id, reason, msg.priority,
msg.idle_timeout, msg.hard_timeout, msg.cookie,
msg.match, msg.stats)
"""
def __init__(self, datapath, table_id=None, reason=None, priority=None,
idle_timeout=None, hard_timeout=None, cookie=None,
match=None, stats=None):
super(OFPFlowRemoved, self).__init__(datapath)
self.table_id = table_id
self.reason = reason
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.cookie = cookie
self.match = match
self.stats = stats
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowRemoved, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.table_id, msg.reason, msg.priority, msg.idle_timeout,
msg.hard_timeout, msg.cookie) = struct.unpack_from(
ofproto.OFP_FLOW_REMOVED_PACK_STR0,
msg.buf, ofproto.OFP_HEADER_SIZE)
offset = (ofproto.OFP_FLOW_REMOVED_SIZE - ofproto.OFP_MATCH_SIZE)
msg.match = OFPMatch.parser(msg.buf, offset)
offset += utils.round_up(msg.match.length, 8)
stats_length = msg.msg_len - offset
if stats_length > 0:
msg.stats = OFPStats.parser(buf, offset)
return msg
class OFPPort(StringifyMixin):
"""
Description of a port
========== =========================================================
Attribute Description
========== =========================================================
port_no Port number and it uniquely identifies a port within
a switch.
length Length of ofp_port (excluding padding).
hw_addr MAC address for the port.
name Null-terminated string containing a human-readable name
for the interface.
config Bitmap of port configration flags.
| OFPPC_PORT_DOWN
| OFPPC_NO_RECV
| OFPPC_NO_FWD
| OFPPC_NO_PACKET_IN
state Bitmap of port state flags.
| OFPPS_LINK_DOWN
| OFPPS_BLOCKED
| OFPPS_LIVE
properties List of ``OFPPortDescProp`` subclass instance
========== =========================================================
"""
_TYPE = {
'ascii': [
'hw_addr',
],
'utf-8': [
# OF spec is unclear about the encoding of name.
# we assumes UTF-8, which is used by OVS.
'name',
]
}
def __init__(self, port_no=None, length=None, hw_addr=None, name=None,
config=None, state=None, properties=None):
super(OFPPort, self).__init__()
self.port_no = port_no
self.length = length
self.hw_addr = hw_addr
self.name = name
self.config = config
self.state = state
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(port_no, length, hw_addr, name, config, state) = struct.unpack_from(
ofproto.OFP_PORT_PACK_STR, buf, offset)
hw_addr = addrconv.mac.bin_to_text(hw_addr)
name = name.rstrip(b'\0')
props = []
rest = buf[offset + ofproto.OFP_PORT_SIZE:offset + length]
while rest:
p, rest = OFPPortDescProp.parse(rest)
props.append(p)
ofpport = cls(port_no, length, hw_addr, name, config, state, props)
return ofpport
class OFPTableDesc(StringifyMixin):
def __init__(self, length=None, table_id=None, config=None,
properties=None):
super(OFPTableDesc, self).__init__()
self.table_id = table_id
self.length = length
self.config = config
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(length, table_id, config) = struct.unpack_from(
ofproto.OFP_TABLE_DESC_PACK_STR, buf, offset)
props = []
rest = buf[offset + ofproto.OFP_TABLE_DESC_SIZE:offset + length]
while rest:
p, rest = OFPTableModProp.parse(rest)
props.append(p)
ofptabledesc = cls(length, table_id, config, props)
return ofptabledesc
class OFPQueueDesc(StringifyMixin):
def __init__(self, port_no=None, queue_id=None, len_=None,
properties=None):
super(OFPQueueDesc, self).__init__()
self.port_no = port_no
self.queue_id = queue_id
self.len = len_
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(port_no, queue_id, len_) = struct.unpack_from(
ofproto.OFP_QUEUE_DESC_PACK_STR, buf, offset)
props = []
rest = buf[offset + ofproto.OFP_QUEUE_DESC_SIZE:offset + len_]
while rest:
p, rest = OFPQueueDescProp.parse(rest)
props.append(p)
ofpqueuedesc = cls(port_no, queue_id, len_, props)
return ofpqueuedesc
def _set_stats_type(stats_type, stats_body_cls):
def _set_cls_stats_type(cls):
cls.cls_stats_type = stats_type
cls.cls_stats_body_cls = stats_body_cls
return cls
return _set_cls_stats_type
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMultipartRequest(MsgBase):
def __init__(self, datapath, flags):
super(OFPMultipartRequest, self).__init__(datapath)
self.type = self.__class__.cls_stats_type
self.flags = flags
def _serialize_stats_body(self):
pass
def _serialize_body(self):
msg_pack_into(ofproto.OFP_MULTIPART_REQUEST_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.type, self.flags)
self._serialize_stats_body()
@_register_parser
@_set_msg_type(ofproto.OFPT_METER_MOD)
class OFPMeterMod(MsgBase):
"""
Meter modification message
The controller sends this message to modify the meter.
================ ======================================================
Attribute Description
================ ======================================================
command One of the following values.
| OFPMC_ADD
| OFPMC_MODIFY
| OFPMC_DELETE
flags Bitmap of the following flags.
| OFPMF_KBPS
| OFPMF_PKTPS
| OFPMF_BURST
| OFPMF_STATS
meter_id Meter instance
bands list of the following class instance.
| OFPMeterBandDrop
| OFPMeterBandDscpRemark
| OFPMeterBandExperimenter
================ ======================================================
"""
def __init__(self, datapath, command=ofproto.OFPMC_ADD,
flags=ofproto.OFPMF_KBPS, meter_id=1, bands=None):
bands = bands if bands else []
super(OFPMeterMod, self).__init__(datapath)
self.command = command
self.flags = flags
self.meter_id = meter_id
self.bands = bands
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPMeterMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.command, msg.flags, msg.meter_id) = struct.unpack_from(
ofproto.OFP_METER_MOD_PACK_STR, buf, ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_METER_MOD_SIZE
msg.bands = []
while offset < msg.msg_len:
band = OFPMeterBandHeader.parser(buf, offset)
msg.bands.append(band)
offset += band.len
return msg
def _serialize_body(self):
msg_pack_into(ofproto.OFP_METER_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.command, self.flags, self.meter_id)
offset = ofproto.OFP_METER_MOD_SIZE
for b in self.bands:
b.serialize(self.buf, offset)
offset += b.len
@_set_msg_type(ofproto.OFPT_TABLE_MOD)
class OFPTableMod(MsgBase):
"""
Flow table configuration message
The controller sends this message to configure table state.
================ ======================================================
Attribute Description
================ ======================================================
table_id ID of the table (OFPTT_ALL indicates all tables)
config Bitmap of configuration flags.
| OFPTC_EVICTION
| OFPTC_VACANCY_EVENTS
properties List of ``OFPTableModProp`` subclass instance
================ ======================================================
Example::
def send_table_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPTableMod(datapath, 1, 3)
flags = ofp.OFPTC_VACANCY_EVENTS
properties = [ofp_parser.OFPTableModPropEviction(flags)]
req = ofp_parser.OFPTableMod(datapath, 1, 3, properties)
datapath.send_msg(req)
"""
def __init__(self, datapath, table_id, config, properties):
super(OFPTableMod, self).__init__(datapath)
self.table_id = table_id
self.config = config
self.properties = properties
def _serialize_body(self):
props_buf = bytearray()
for p in self.properties:
props_buf += p.serialize()
msg_pack_into(ofproto.OFP_TABLE_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.table_id, self.config)
self.buf += props_buf
@_register_parser
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMultipartReply(MsgBase):
_STATS_MSG_TYPES = {}
@staticmethod
def register_stats_type(body_single_struct=False):
def _register_stats_type(cls):
assert cls.cls_stats_type is not None
assert cls.cls_stats_type not in OFPMultipartReply._STATS_MSG_TYPES
assert cls.cls_stats_body_cls is not None
cls.cls_body_single_struct = body_single_struct
OFPMultipartReply._STATS_MSG_TYPES[cls.cls_stats_type] = cls
return cls
return _register_stats_type
def __init__(self, datapath, body=None, flags=None):
super(OFPMultipartReply, self).__init__(datapath)
self.body = body
self.flags = flags
@classmethod
def parser_stats_body(cls, buf, msg_len, offset):
body_cls = cls.cls_stats_body_cls
body = []
while offset < msg_len:
entry = body_cls.parser(buf, offset)
body.append(entry)
offset += entry.length
if cls.cls_body_single_struct:
return body[0]
return body
@classmethod
def parser_stats(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = MsgBase.parser.__func__(
cls, datapath, version, msg_type, msg_len, xid, buf)
msg.body = msg.parser_stats_body(msg.buf, msg.msg_len,
ofproto.OFP_MULTIPART_REPLY_SIZE)
return msg
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
type_, flags = struct.unpack_from(
ofproto.OFP_MULTIPART_REPLY_PACK_STR, six.binary_type(buf),
ofproto.OFP_HEADER_SIZE)
stats_type_cls = cls._STATS_MSG_TYPES.get(type_)
msg = super(OFPMultipartReply, stats_type_cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
msg.type = type_
msg.flags = flags
offset = ofproto.OFP_MULTIPART_REPLY_SIZE
body = []
while offset < msg_len:
b = stats_type_cls.cls_stats_body_cls.parser(msg.buf, offset)
offset_step = b.length if hasattr(b, 'length') else b.len
if offset_step < 1:
raise exception.OFPMalformedMessage()
body.append(b)
offset += offset_step
if stats_type_cls.cls_body_single_struct:
msg.body = body[0]
else:
msg.body = body
return msg
class OFPDescStats(ofproto_parser.namedtuple('OFPDescStats', (
'mfr_desc', 'hw_desc', 'sw_desc', 'serial_num', 'dp_desc'))):
_TYPE = {
'ascii': [
'mfr_desc',
'hw_desc',
'sw_desc',
'serial_num',
'dp_desc',
]
}
@classmethod
def parser(cls, buf, offset):
desc = struct.unpack_from(ofproto.OFP_DESC_PACK_STR,
buf, offset)
desc = list(desc)
desc = [x.rstrip(b'\0') for x in desc]
stats = cls(*desc)
stats.length = ofproto.OFP_DESC_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_DESC, OFPDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPDescStatsRequest(OFPMultipartRequest):
"""
Description statistics request message
The controller uses this message to query description of the switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_desc_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPDescStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPDescStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_DESC, OFPDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPDescStatsReply(OFPMultipartReply):
"""
Description statistics reply message
The switch responds with this message to a description statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPDescStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER)
def desc_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('DescStats: mfr_desc=%s hw_desc=%s sw_desc=%s '
'serial_num=%s dp_desc=%s',
body.mfr_desc, body.hw_desc, body.sw_desc,
body.serial_num, body.dp_desc)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPDescStatsReply, self).__init__(datapath, **kwargs)
class OFPTableFeaturesStats(StringifyMixin):
_TYPE = {
'utf-8': [
# OF spec is unclear about the encoding of name.
# we assumes UTF-8.
'name',
]
}
def __init__(self, table_id=None, command=None, features=None, name=None,
metadata_match=None, metadata_write=None, capabilities=None,
max_entries=None, properties=None, length=None):
super(OFPTableFeaturesStats, self).__init__()
self.length = length
self.table_id = table_id
self.command = command
self.features = features
self.name = name
self.metadata_match = metadata_match
self.metadata_write = metadata_write
self.capabilities = capabilities
self.max_entries = max_entries
self.properties = properties
@classmethod
def parser(cls, buf, offset):
tbl = cls()
(tbl.length, tbl.table_id, tbl.command, tbl.features,
name, tbl.metadata_match, tbl.metadata_write,
tbl.capabilities, tbl.max_entries
) = struct.unpack_from(ofproto.OFP_TABLE_FEATURES_PACK_STR,
buf, offset)
tbl.name = name.rstrip(b'\0')
props = []
rest = buf[offset + ofproto.OFP_TABLE_FEATURES_SIZE:
offset + tbl.length]
while rest:
p, rest = OFPTableFeatureProp.parse(rest)
props.append(p)
tbl.properties = props
return tbl
def serialize(self):
# fixup
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
self.length = ofproto.OFP_TABLE_FEATURES_SIZE + len(bin_props)
buf = bytearray()
msg_pack_into(ofproto.OFP_TABLE_FEATURES_PACK_STR, buf, 0,
self.length, self.table_id, self.command, self.features,
self.name, self.metadata_match, self.metadata_write,
self.capabilities, self.max_entries)
return buf + bin_props
class OFPTableFeatureProp(OFPPropBase):
_TYPES = {}
class OFPInstructionId(StringifyMixin):
_PACK_STR = '!HH' # type, len
def __init__(self, type_, len_=None):
self.type = type_
self.len = len_
# XXX experimenter
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
def serialize(self):
# fixup
self.len = struct.calcsize(self._PACK_STR)
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.len)
return buf
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_INSTRUCTIONS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_INSTRUCTIONS_MISS)
class OFPTableFeaturePropInstructions(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, instruction_ids=None):
instruction_ids = instruction_ids if instruction_ids else []
super(OFPTableFeaturePropInstructions, self).__init__(type_, length)
self.instruction_ids = instruction_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPInstructionId.parse(rest)
ids.append(i)
return cls(instruction_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.instruction_ids:
bin_ids += i.serialize()
return bin_ids
# Implementation note: While OpenFlow 1.3.2 shares the same ofp_action_header
# for flow_mod and table_features, we have separate classes. We named this
# class to match with OpenFlow 1.4's name. (ofp_action_id)
class OFPActionId(StringifyMixin):
_PACK_STR = '!HH' # type, len
def __init__(self, type_, len_=None):
self.type = type_
self.len = len_
# XXX experimenter
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
def serialize(self):
# fixup
self.len = struct.calcsize(self._PACK_STR)
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.len)
return buf
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_ACTIONS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_ACTIONS_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_ACTIONS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_ACTIONS_MISS)
class OFPTableFeaturePropActions(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, action_ids=None):
action_ids = action_ids if action_ids else []
super(OFPTableFeaturePropActions, self).__init__(type_, length)
self.action_ids = action_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPActionId.parse(rest)
ids.append(i)
return cls(action_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.action_ids:
bin_ids += i.serialize()
return bin_ids
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_NEXT_TABLES)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_NEXT_TABLES_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_TABLE_SYNC_FROM)
class OFPTableFeaturePropNextTables(OFPTableFeatureProp):
_TABLE_ID_PACK_STR = '!B'
def __init__(self, type_=None, length=None, table_ids=None):
table_ids = table_ids if table_ids else []
super(OFPTableFeaturePropNextTables, self).__init__(type_, length)
self.table_ids = table_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
(i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, six.binary_type(rest), 0)
rest = rest[struct.calcsize(cls._TABLE_ID_PACK_STR):]
ids.append(i)
return cls(table_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.table_ids:
bin_id = bytearray()
msg_pack_into(self._TABLE_ID_PACK_STR, bin_id, 0, i)
bin_ids += bin_id
return bin_ids
# Implementation note: OFPOxmId is specific to this implementation.
# It does not have a corresponding structure in the specification.
# (the specification uses plain uint32_t for non-experimenter OXMs
# and uint64_t for experimenter OXMs.)
#
# i have taken a look at some of software switch implementations
# but they all look broken or incomplete. according to the spec,
# oxm_hasmask should be 1 if a switch supports masking for the type.
# the right value for oxm_length is not clear from the spec.
# update: OpenFlow 1.3.3 "clarified" that oxm_length here is the payload
# length. it's still unclear if it should be doubled for hasmask or not,
# though.
# ofsoftswitch13
# oxm_hasmask always 0
# oxm_length same as ofp_match etc (as without mask)
# linc/of_protocol
# oxm_hasmask always 0
# oxm_length always 0
# ovs:
# seems in flux as of writing this [20141003]
class OFPOxmId(StringifyMixin):
_PACK_STR = '!I' # oxm header
_EXPERIMENTER_ID_PACK_STR = '!I'
_TYPE = {
'ascii': [
'type',
],
}
def __init__(self, type_, hasmask=False, length=None):
self.type = type_
self.hasmask = hasmask
self.length = length
@classmethod
def parse(cls, buf):
(oxm,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
# oxm (32 bit) == class (16) | field (7) | hasmask (1) | length (8)
# in case of experimenter OXMs, another 32 bit value
# (experimenter id) follows.
(type_, _v) = ofproto.oxm_to_user(oxm >> (1 + 8), None, None)
rest = buf[struct.calcsize(cls._PACK_STR):]
hasmask = ofproto.oxm_tlv_header_extract_hasmask(oxm)
length = oxm & 0xff # XXX see the comment on OFPOxmId
class_ = oxm >> (7 + 1 + 8)
if class_ == ofproto.OFPXMC_EXPERIMENTER:
(exp_id,) = struct.unpack_from(cls._EXPERIMENTER_ID_PACK_STR,
six.binary_type(rest), 0)
rest = rest[struct.calcsize(cls._EXPERIMENTER_ID_PACK_STR):]
subcls = OFPExperimenterOxmId
return subcls(type_=type_, exp_id=exp_id, hasmask=hasmask,
length=length), rest
else:
return cls(type_=type_, hasmask=hasmask, length=length), rest
def serialize(self):
# fixup
self.length = 0 # XXX see the comment on OFPOxmId
(n, _v, _m) = ofproto.oxm_from_user(self.type, None)
oxm = (n << (1 + 8)) | (self.hasmask << 8) | self.length
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, oxm)
assert n >> 7 != ofproto.OFPXMC_EXPERIMENTER
return buf
class OFPExperimenterOxmId(OFPOxmId):
def __init__(self, type_, exp_id, hasmask=False, length=None):
super(OFPExperimenterOxmId, self).__init__(type_=type_,
hasmask=hasmask,
length=length)
self.exp_id = exp_id
def serialize(self):
buf = super(OFPExperimenterOxmId, self).serialize()
msg_pack_into(self._EXPERIMENTER_ID_PACK_STR, buf,
struct.calcsize(self._PACK_STR), self.exp_id)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_MATCH)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WILDCARDS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_SETFIELD)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_SETFIELD_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_SETFIELD)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_SETFIELD_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_COPYFIELD)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_COPYFIELD_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_COPYFIELD)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_COPYFIELD_MISS)
class OFPTableFeaturePropOxm(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, oxm_ids=None):
oxm_ids = oxm_ids if oxm_ids else []
super(OFPTableFeaturePropOxm, self).__init__(type_, length)
self.oxm_ids = oxm_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPOxmId.parse(rest)
ids.append(i)
return cls(oxm_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.oxm_ids:
bin_ids += i.serialize()
return bin_ids
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_PACKET_TYPES)
class OFPTableFeaturePropOxmValues(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, _ordered_values=None,
**kwargs):
super(OFPTableFeaturePropOxmValues, self).__init__(type_, length)
if _ordered_values is not None:
assert not kwargs
self.oxm_values = _ordered_values
else:
kwargs = dict(ofproto.oxm_normalize_user(k, v) for
(k, v) in kwargs.items())
values = [ofproto.oxm_from_user(k, v) for (k, v)
in kwargs.items()]
# assumption: sorting by OXM type values makes fields
# meet ordering requirements (eg. eth_type before ipv4_src)
values.sort(
key=lambda x: x[0][0] if isinstance(x[0], tuple) else x[0])
self.oxm_values = [ofproto.oxm_to_user(n, v, m) for (n, v, m)
in values]
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
values = []
while rest:
n, value, mask, field_len = ofproto.oxm_parse(rest, 0)
k, uv = ofproto.oxm_to_user(n, value, mask)
values.append((k, uv))
rest = rest[field_len:]
return cls(_ordered_values=values)
def serialize_body(self):
values = [ofproto.oxm_from_user(k, uv) for (k, uv)
in self.oxm_values]
offset = 0
buf = bytearray()
for (n, value, mask) in values:
offset += ofproto.oxm_serialize(n, value, mask, buf, offset)
return buf
def __getitem__(self, key):
return dict(self.oxm_values)[key]
def __contains__(self, key):
return key in dict(self.oxm_values)
def iteritems(self):
return iter(dict(self.oxm_values).items())
def items(self):
return self.oxm_values
def get(self, key, default=None):
return dict(self.oxm_values).get(key, default)
def stringify_attrs(self):
yield "oxm_values", dict(self.oxm_values)
def to_jsondict(self):
"""
Returns a dict expressing the OXM values.
"""
body = {"oxm_values": [ofproto.oxm_to_jsondict(k, uv) for k, uv
in self.oxm_values],
"length": self.length,
"type": self.type}
return {self.__class__.__name__: body}
@classmethod
def from_jsondict(cls, dict_):
"""
Returns an object which is generated from a dict.
Exception raises:
KeyError -- Unknown OXM value is defined in dict
"""
type_ = dict_['type']
values = [ofproto.oxm_from_jsondict(f) for f
in dict_['oxm_values']]
return cls(type_=type_, _ordered_values=values)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_EXPERIMENTER)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_EXPERIMENTER_MISS)
class OFPTableFeaturePropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
@_set_stats_type(ofproto.OFPMP_TABLE_FEATURES, OFPTableFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPTableFeaturesStatsRequest(OFPMultipartRequest):
"""
Table features statistics request message
The controller uses this message to query table features.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableFeaturesStats`` instances.
The default is [].
================ ======================================================
"""
def __init__(self, datapath, flags=0, body=None, type_=None):
body = body if body else []
super(OFPTableFeaturesStatsRequest, self).__init__(datapath, flags)
self.body = body
def _serialize_stats_body(self):
bin_body = bytearray()
for p in self.body:
bin_body += p.serialize()
self.buf += bin_body
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_TABLE_FEATURES, OFPTableFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPTableFeaturesStatsReply(OFPMultipartReply):
"""
Table features statistics reply message
The switch responds with this message to a table features statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableFeaturesStats`` instance
================ ======================================================
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPTableFeaturesStatsReply, self).__init__(datapath, **kwargs)
@_set_stats_type(ofproto.OFPMP_PORT_DESC, OFPPort)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPPortDescStatsRequest(OFPMultipartRequest):
"""
Port description request message
The controller uses this message to query description of one or all the ports.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
port_no Port number to read (OFPP_ANY to all ports)
================ ======================================================
Example::
def send_port_desc_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortDescStatsRequest(datapath, 0, ofp.OFPP_ANY)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, port_no=ofproto.OFPP_ANY, type_=None):
super(OFPPortDescStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_PORT_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.port_no)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_PORT_DESC, OFPPort)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPPortDescStatsReply(OFPMultipartReply):
"""
Port description reply message
The switch responds with this message to a port description request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPPort`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x properties=%s' %
(p.port_no, p.hw_addr,
p.name, p.config, p.state, repr(p.properties)))
self.logger.debug('OFPPortDescStatsReply received: %s', ports)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPPortDescStatsReply, self).__init__(datapath, **kwargs)
@_set_stats_type(ofproto.OFPMP_TABLE_DESC, OFPTableDesc)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPTableDescStatsRequest(OFPMultipartRequest):
"""
Table description request message
The controller uses this message to query description of all the tables.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_table_desc_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPTableDescStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPTableDescStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_TABLE_DESC, OFPTableDesc)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPTableDescStatsReply(OFPMultipartReply):
"""
Table description reply message
The switch responds with this message to a table description request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableDesc`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPTableDescStatsReply, MAIN_DISPATCHER)
def table_desc_stats_reply_handler(self, ev):
tables = []
for p in ev.msg.body:
tables.append('table_id=%d config=0x%08x properties=%s' %
(p.table_id, p.config, repr(p.properties)))
self.logger.debug('OFPTableDescStatsReply received: %s', tables)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPTableDescStatsReply, self).__init__(datapath, **kwargs)
@_set_stats_type(ofproto.OFPMP_QUEUE_DESC, OFPQueueDesc)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPQueueDescStatsRequest(OFPMultipartRequest):
"""
Queue description request message
The controller uses this message to query description of all the queues.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
port_no Port number to read (OFPP_ANY for all ports)
queue_id ID of queue to read (OFPQ_ALL for all queues)
================ ======================================================
Example::
def send_queue_desc_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPQueueDescStatsRequest(datapath, 0,
ofp.OFPP_ANY,
ofp.OFPQ_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, port_no=ofproto.OFPP_ANY,
queue_id=ofproto.OFPQ_ALL, type_=None):
super(OFPQueueDescStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
self.queue_id = queue_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_QUEUE_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.port_no, self.queue_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_QUEUE_DESC, OFPQueueDesc)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPQueueDescStatsReply(OFPMultipartReply):
"""
Queue description reply message
The switch responds with this message to a queue description request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPQueueDesc`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPQueueDescStatsReply, MAIN_DISPATCHER)
def queue_desc_stats_reply_handler(self, ev):
queues = []
for q in ev.msg.body:
queues.append('port_no=%d queue_id=0x%08x properties=%s' %
(q.port_no, q.queue_id, repr(q.properties)))
self.logger.debug('OFPQueueDescStatsReply received: %s', queues)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPQueueDescStatsReply, self).__init__(datapath, **kwargs)
class OFPQueueStatsProp(OFPPropBase):
_TYPES = {}
@OFPQueueStatsProp.register_type(ofproto.OFPQSPT_EXPERIMENTER)
class OFPQueueStatsPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPQueueStats(StringifyMixin):
def __init__(self, length=None, port_no=None, queue_id=None,
tx_bytes=None, tx_packets=None, tx_errors=None,
duration_sec=None, duration_nsec=None, properties=None):
super(OFPQueueStats, self).__init__()
self.length = length
self.port_no = port_no
self.queue_id = queue_id
self.tx_bytes = tx_bytes
self.tx_packets = tx_packets
self.tx_errors = tx_errors
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(length, port_no, queue_id, tx_bytes, tx_packets, tx_errors,
duration_sec, duration_nsec) = struct.unpack_from(
ofproto.OFP_QUEUE_STATS_PACK_STR, buf, offset)
props = []
rest = buf[offset + ofproto.OFP_QUEUE_STATS_SIZE:offset + length]
while rest:
p, rest = OFPQueueStatsProp.parse(rest)
props.append(p)
stats = cls(length, port_no, queue_id, tx_bytes, tx_packets, tx_errors,
duration_sec, duration_nsec, props)
return stats
@_set_stats_type(ofproto.OFPMP_QUEUE_STATS, OFPQueueStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPQueueStatsRequest(OFPMultipartRequest):
"""
Queue statistics request message
The controller uses this message to query queue statictics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
port_no Port number to read
queue_id ID of queue to read
================ ======================================================
Example::
def send_queue_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPQueueStatsRequest(datapath, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, port_no=ofproto.OFPP_ANY,
queue_id=ofproto.OFPQ_ALL, type_=None):
super(OFPQueueStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
self.queue_id = queue_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_QUEUE_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.port_no, self.queue_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_QUEUE_STATS, OFPQueueStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPQueueStatsReply(OFPMultipartReply):
"""
Queue statistics reply message
The switch responds with this message to an aggregate flow statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPQueueStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler(self, ev):
queues = []
for stat in ev.msg.body:
queues.append('port_no=%d queue_id=%d '
'tx_bytes=%d tx_packets=%d tx_errors=%d '
'duration_sec=%d duration_nsec=%d'
'properties=%s' %
(stat.port_no, stat.queue_id,
stat.tx_bytes, stat.tx_packets, stat.tx_errors,
stat.duration_sec, stat.duration_nsec,
repr(stat.properties)))
self.logger.debug('QueueStats: %s', queues)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPQueueStatsReply, self).__init__(datapath, **kwargs)
class OFPBucketCounter(StringifyMixin):
def __init__(self, packet_count, byte_count):
super(OFPBucketCounter, self).__init__()
self.packet_count = packet_count
self.byte_count = byte_count
@classmethod
def parser(cls, buf, offset):
packet_count, byte_count = struct.unpack_from(
ofproto.OFP_BUCKET_COUNTER_PACK_STR, buf, offset)
return cls(packet_count, byte_count)
class OFPGroupStats(StringifyMixin):
def __init__(self, length=None, group_id=None, ref_count=None,
packet_count=None, byte_count=None, duration_sec=None,
duration_nsec=None, bucket_stats=None):
super(OFPGroupStats, self).__init__()
self.length = length
self.group_id = group_id
self.ref_count = ref_count
self.packet_count = packet_count
self.byte_count = byte_count
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.bucket_stats = bucket_stats
@classmethod
def parser(cls, buf, offset):
group = struct.unpack_from(ofproto.OFP_GROUP_STATS_PACK_STR,
buf, offset)
group_stats = cls(*group)
group_stats.bucket_stats = []
total_len = group_stats.length + offset
offset += ofproto.OFP_GROUP_STATS_SIZE
while total_len > offset:
b = OFPBucketCounter.parser(buf, offset)
group_stats.bucket_stats.append(b)
offset += ofproto.OFP_BUCKET_COUNTER_SIZE
return group_stats
@_set_stats_type(ofproto.OFPMP_GROUP_STATS, OFPGroupStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupStatsRequest(OFPMultipartRequest):
"""
Group statistics request message
The controller uses this message to query statistics of one or more
groups.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
group_id ID of group to read (OFPG_ALL to all groups)
================ ======================================================
Example::
def send_group_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGroupStatsRequest(datapath, 0, ofp.OFPG_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, group_id=ofproto.OFPG_ALL,
type_=None):
super(OFPGroupStatsRequest, self).__init__(datapath, flags)
self.group_id = group_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_GROUP_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.group_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_GROUP_STATS, OFPGroupStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPGroupStatsReply(OFPMultipartReply):
"""
Group statistics reply message
The switch responds with this message to a group statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPGroupStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPGroupStatsReply, MAIN_DISPATCHER)
def group_stats_reply_handler(self, ev):
groups = []
for stat in ev.msg.body:
groups.append('length=%d group_id=%d '
'ref_count=%d packet_count=%d byte_count=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.length, stat.group_id,
stat.ref_count, stat.packet_count,
stat.byte_count, stat.duration_sec,
stat.duration_nsec))
self.logger.debug('GroupStats: %s', groups)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPGroupStatsReply, self).__init__(datapath, **kwargs)
class OFPGroupDescStats(StringifyMixin):
def __init__(self, type_=None, group_id=None, buckets=None, properties=None,
length=None, bucket_array_len=None):
buckets = buckets if buckets else []
properties = properties if properties else []
super(OFPGroupDescStats, self).__init__()
self.length = length
self.type = type_
self.group_id = group_id
self.buckets = buckets
self.properties = properties
@classmethod
def parser(cls, buf, offset):
stats = cls()
(stats.length, stats.type, stats.group_id,
stats.bucket_array_len) = struct.unpack_from(
ofproto.OFP_GROUP_DESC_STATS_PACK_STR, buf, offset)
offset += ofproto.OFP_GROUP_DESC_STATS_SIZE
bucket_buf = buf[offset:offset + stats.bucket_array_len]
stats.buckets = []
while bucket_buf:
bucket = OFPBucket.parser(bucket_buf, 0)
stats.buckets.append(bucket)
bucket_buf = bucket_buf[bucket.len:]
offset += stats.bucket_array_len
rest = buf[offset:offset + stats.length]
while rest:
p, rest = OFPGroupProp.parse(rest)
stats.properties.append(p)
return stats
@_set_stats_type(ofproto.OFPMP_GROUP_DESC, OFPGroupDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupDescStatsRequest(OFPMultipartRequest):
"""
Group description request message
The controller uses this message to list the set of groups on a switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
group_id ID of group to read (OFPG_ALL to all groups)
================ ======================================================
Example::
def send_group_desc_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGroupDescStatsRequest(datapath, 0, ofp.OFPG_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, group_id=ofproto.OFPG_ALL,
type_=None):
super(OFPGroupDescStatsRequest, self).__init__(datapath, flags)
self.group_id = group_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_GROUP_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.group_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_GROUP_DESC, OFPGroupDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPGroupDescStatsReply(OFPMultipartReply):
"""
Group description reply message
The switch responds with this message to a group description request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPGroupDescStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPGroupDescStatsReply, MAIN_DISPATCHER)
def group_desc_stats_reply_handler(self, ev):
descs = []
for stat in ev.msg.body:
descs.append('length=%d type=%d group_id=%d '
'buckets=%s properties=%s' %
(stat.length, stat.type, stat.group_id,
stat.bucket, repr(stat.properties)))
self.logger.debug('GroupDescStats: %s', descs)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPGroupDescStatsReply, self).__init__(datapath, **kwargs)
class OFPGroupFeaturesStats(ofproto_parser.namedtuple('OFPGroupFeaturesStats',
('types', 'capabilities', 'max_groups',
'actions'))):
@classmethod
def parser(cls, buf, offset):
group_features = struct.unpack_from(
ofproto.OFP_GROUP_FEATURES_PACK_STR, buf, offset)
types = group_features[0]
capabilities = group_features[1]
max_groups = list(group_features[2:6])
actions = list(group_features[6:10])
stats = cls(types, capabilities, max_groups, actions)
stats.length = ofproto.OFP_GROUP_FEATURES_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_GROUP_FEATURES, OFPGroupFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupFeaturesStatsRequest(OFPMultipartRequest):
"""
Group features request message
The controller uses this message to list the capabilities of groups on
a switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_group_features_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGroupFeaturesStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPGroupFeaturesStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_GROUP_FEATURES, OFPGroupFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPGroupFeaturesStatsReply(OFPMultipartReply):
"""
Group features reply message
The switch responds with this message to a group features request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPGroupFeaturesStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPGroupFeaturesStatsReply, MAIN_DISPATCHER)
def group_features_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('GroupFeaturesStats: types=%d '
'capabilities=0x%08x max_groups=%s '
'actions=%s',
body.types, body.capabilities,
body.max_groups, body.actions)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPGroupFeaturesStatsReply, self).__init__(datapath, **kwargs)
class OFPMeterBandStats(StringifyMixin):
def __init__(self, packet_band_count, byte_band_count):
super(OFPMeterBandStats, self).__init__()
self.packet_band_count = packet_band_count
self.byte_band_count = byte_band_count
@classmethod
def parser(cls, buf, offset):
band_stats = struct.unpack_from(
ofproto.OFP_METER_BAND_STATS_PACK_STR, buf, offset)
return cls(*band_stats)
class OFPMeterStats(StringifyMixin):
def __init__(self, meter_id=None, ref_count=None, packet_in_count=None,
byte_in_count=None, duration_sec=None, duration_nsec=None,
band_stats=None, len_=None):
super(OFPMeterStats, self).__init__()
self.meter_id = meter_id
self.len = 0
self.ref_count = ref_count
self.packet_in_count = packet_in_count
self.byte_in_count = byte_in_count
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.band_stats = band_stats
@classmethod
def parser(cls, buf, offset):
meter_stats = cls()
(meter_stats.meter_id, meter_stats.len,
meter_stats.ref_count, meter_stats.packet_in_count,
meter_stats.byte_in_count, meter_stats.duration_sec,
meter_stats.duration_nsec) = struct.unpack_from(
ofproto.OFP_METER_STATS_PACK_STR, buf, offset)
offset += ofproto.OFP_METER_STATS_SIZE
meter_stats.band_stats = []
length = ofproto.OFP_METER_STATS_SIZE
while length < meter_stats.len:
band_stats = OFPMeterBandStats.parser(buf, offset)
meter_stats.band_stats.append(band_stats)
offset += ofproto.OFP_METER_BAND_STATS_SIZE
length += ofproto.OFP_METER_BAND_STATS_SIZE
return meter_stats
@_set_stats_type(ofproto.OFPMP_METER_STATS, OFPMeterStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMeterStatsRequest(OFPMultipartRequest):
"""
Meter statistics request message
The controller uses this message to query statistics for one or more
meters.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
meter_id ID of meter to read (OFPM_ALL to all meters)
================ ======================================================
Example::
def send_meter_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPMeterStatsRequest(datapath, 0, ofp.OFPM_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, meter_id=ofproto.OFPM_ALL,
type_=None):
super(OFPMeterStatsRequest, self).__init__(datapath, flags)
self.meter_id = meter_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_METER_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.meter_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_METER_STATS, OFPMeterStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMeterStatsReply(OFPMultipartReply):
"""
Meter statistics reply message
The switch responds with this message to a meter statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPMeterStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPMeterStatsReply, MAIN_DISPATCHER)
def meter_stats_reply_handler(self, ev):
meters = []
for stat in ev.msg.body:
meters.append('meter_id=0x%08x len=%d ref_count=%d '
'packet_in_count=%d byte_in_count=%d '
'duration_sec=%d duration_nsec=%d '
'band_stats=%s' %
(stat.meter_id, stat.len, stat.ref_count,
stat.packet_in_count, stat.byte_in_count,
stat.duration_sec, stat.duration_nsec,
stat.band_stats))
self.logger.debug('MeterStats: %s', meters)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPMeterStatsReply, self).__init__(datapath, **kwargs)
class OFPMeterBand(StringifyMixin):
def __init__(self, type_, len_):
super(OFPMeterBand, self).__init__()
self.type = type_
self.len = len_
class OFPMeterBandHeader(OFPMeterBand):
_METER_BAND = {}
@staticmethod
def register_meter_band_type(type_, len_):
def _register_meter_band_type(cls):
OFPMeterBandHeader._METER_BAND[type_] = cls
cls.cls_meter_band_type = type_
cls.cls_meter_band_len = len_
return cls
return _register_meter_band_type
def __init__(self):
cls = self.__class__
super(OFPMeterBandHeader, self).__init__(cls.cls_meter_band_type,
cls.cls_meter_band_len)
@classmethod
def parser(cls, buf, offset):
type_, len_, _rate, _burst_size = struct.unpack_from(
ofproto.OFP_METER_BAND_HEADER_PACK_STR, buf, offset)
cls_ = cls._METER_BAND[type_]
assert cls_.cls_meter_band_len == len_
return cls_.parser(buf, offset)
@OFPMeterBandHeader.register_meter_band_type(
ofproto.OFPMBT_DROP, ofproto.OFP_METER_BAND_DROP_SIZE)
class OFPMeterBandDrop(OFPMeterBandHeader):
def __init__(self, rate=0, burst_size=0, type_=None, len_=None):
super(OFPMeterBandDrop, self).__init__()
self.rate = rate
self.burst_size = burst_size
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_METER_BAND_DROP_PACK_STR, buf, offset,
self.type, self.len, self.rate, self.burst_size)
@classmethod
def parser(cls, buf, offset):
type_, len_, rate, burst_size = struct.unpack_from(
ofproto.OFP_METER_BAND_DROP_PACK_STR, buf, offset)
assert cls.cls_meter_band_type == type_
assert cls.cls_meter_band_len == len_
return cls(rate, burst_size)
@OFPMeterBandHeader.register_meter_band_type(
ofproto.OFPMBT_DSCP_REMARK,
ofproto.OFP_METER_BAND_DSCP_REMARK_SIZE)
class OFPMeterBandDscpRemark(OFPMeterBandHeader):
def __init__(self, rate=0, burst_size=0, prec_level=0,
type_=None, len_=None):
super(OFPMeterBandDscpRemark, self).__init__()
self.rate = rate
self.burst_size = burst_size
self.prec_level = prec_level
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_METER_BAND_DSCP_REMARK_PACK_STR, buf,
offset, self.type, self.len, self.rate,
self.burst_size, self.prec_level)
@classmethod
def parser(cls, buf, offset):
type_, len_, rate, burst_size, prec_level = struct.unpack_from(
ofproto.OFP_METER_BAND_DSCP_REMARK_PACK_STR, buf, offset)
assert cls.cls_meter_band_type == type_
assert cls.cls_meter_band_len == len_
return cls(rate, burst_size, prec_level)
@OFPMeterBandHeader.register_meter_band_type(
ofproto.OFPMBT_EXPERIMENTER,
ofproto.OFP_METER_BAND_EXPERIMENTER_SIZE)
class OFPMeterBandExperimenter(OFPMeterBandHeader):
def __init__(self, rate=0, burst_size=0, experimenter=None,
type_=None, len_=None):
super(OFPMeterBandExperimenter, self).__init__()
self.rate = rate
self.burst_size = burst_size
self.experimenter = experimenter
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_METER_BAND_EXPERIMENTER_PACK_STR, buf,
offset, self.type, self.len, self.rate,
self.burst_size, self.experimenter)
@classmethod
def parser(cls, buf, offset):
type_, len_, rate, burst_size, experimenter = struct.unpack_from(
ofproto.OFP_METER_BAND_EXPERIMENTER_PACK_STR, buf, offset)
assert cls.cls_meter_band_type == type_
assert cls.cls_meter_band_len == len_
return cls(rate, burst_size, experimenter)
class OFPMeterDescStats(StringifyMixin):
def __init__(self, flags=None, meter_id=None, bands=None, length=None):
super(OFPMeterDescStats, self).__init__()
self.length = None
self.flags = flags
self.meter_id = meter_id
self.bands = bands
@classmethod
def parser(cls, buf, offset):
meter_config = cls()
(meter_config.length, meter_config.flags,
meter_config.meter_id) = struct.unpack_from(
ofproto.OFP_METER_DESC_PACK_STR, buf, offset)
offset += ofproto.OFP_METER_DESC_SIZE
meter_config.bands = []
length = ofproto.OFP_METER_DESC_SIZE
while length < meter_config.length:
band = OFPMeterBandHeader.parser(buf, offset)
meter_config.bands.append(band)
offset += band.len
length += band.len
return meter_config
@_set_stats_type(ofproto.OFPMP_METER_DESC, OFPMeterDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMeterDescStatsRequest(OFPMultipartRequest):
"""
Meter description statistics request message
The controller uses this message to query configuration for one or more
meters.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
meter_id ID of meter to read (OFPM_ALL to all meters)
================ ======================================================
Example::
def send_meter_desc_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPMeterDescStatsRequest(datapath, 0,
ofp.OFPM_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, meter_id=ofproto.OFPM_ALL,
type_=None):
super(OFPMeterDescStatsRequest, self).__init__(datapath, flags)
self.meter_id = meter_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_METER_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.meter_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_METER_DESC, OFPMeterDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMeterDescStatsReply(OFPMultipartReply):
"""
Meter description statistics reply message
The switch responds with this message to a meter description
statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPMeterDescStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPMeterDescStatsReply, MAIN_DISPATCHER)
def meter_desc_stats_reply_handler(self, ev):
configs = []
for stat in ev.msg.body:
configs.append('length=%d flags=0x%04x meter_id=0x%08x '
'bands=%s' %
(stat.length, stat.flags, stat.meter_id,
stat.bands))
self.logger.debug('MeterDescStats: %s', configs)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPMeterDescStatsReply, self).__init__(datapath, **kwargs)
class OFPMeterFeaturesStats(ofproto_parser.namedtuple('OFPMeterFeaturesStats',
('max_meter', 'band_types', 'capabilities',
'max_bands', 'max_color', 'features'))):
@classmethod
def parser(cls, buf, offset):
meter_features = struct.unpack_from(
ofproto.OFP_METER_FEATURES_PACK_STR, buf, offset)
stats = cls(*meter_features)
stats.length = ofproto.OFP_METER_FEATURES_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_METER_FEATURES, OFPMeterFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMeterFeaturesStatsRequest(OFPMultipartRequest):
"""
Meter features statistics request message
The controller uses this message to query the set of features of the
metering subsystem.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_meter_features_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPMeterFeaturesStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPMeterFeaturesStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_METER_FEATURES, OFPMeterFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMeterFeaturesStatsReply(OFPMultipartReply):
"""
Meter features statistics reply message
The switch responds with this message to a meter features statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPMeterFeaturesStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER)
def meter_features_stats_reply_handler(self, ev):
features = []
for stat in ev.msg.body:
features.append('max_meter=%d band_types=0x%08x '
'capabilities=0x%08x max_bands=%d '
'max_color=%d' %
(stat.max_meter, stat.band_types,
stat.capabilities, stat.max_bands,
stat.max_color))
self.logger.debug('MeterFeaturesStats: %s', features)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPMeterFeaturesStatsReply, self).__init__(datapath, **kwargs)
class OFPFlowUpdate(StringifyMixin):
def __init__(self, length, event):
super(OFPFlowUpdate, self).__init__()
self.length = length
self.event = event
class OFPFlowUpdateHeader(OFPFlowUpdate):
_EVENT = {}
@staticmethod
def register_flow_update_event(event, length):
def _register_flow_update_event(cls):
OFPFlowUpdateHeader._EVENT[event] = cls
cls.cls_flow_update_event = event
cls.cls_flow_update_length = length
return cls
return _register_flow_update_event
def __init__(self, length=None, event=None):
cls = self.__class__
super(OFPFlowUpdateHeader, self).__init__(length,
cls.cls_flow_update_event)
self.length = length
@classmethod
def parser(cls, buf, offset):
length, event = struct.unpack_from(
ofproto.OFP_FLOW_UPDATE_HEADER_PACK_STR, buf, offset)
cls_ = cls._EVENT[event]
return cls_.parser(buf, offset)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_INITIAL, ofproto.OFP_FLOW_UPDATE_FULL_SIZE)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_ADDED, ofproto.OFP_FLOW_UPDATE_FULL_SIZE)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_REMOVED, ofproto.OFP_FLOW_UPDATE_FULL_SIZE)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_MODIFIED, ofproto.OFP_FLOW_UPDATE_FULL_SIZE)
class OFPFlowUpdateFull(OFPFlowUpdateHeader):
def __init__(self, length=None, event=None, table_id=None, reason=None,
idle_timeout=None, hard_timeout=None, priority=None,
cookie=None, match=None, instructions=None):
instructions = instructions if instructions else []
super(OFPFlowUpdateFull, self).__init__(length, event)
self.table_id = table_id
self.reason = reason
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.cookie = cookie
self.match = match
assert (event != ofproto.OFPFME_REMOVED or len(instructions) == 0)
for i in instructions:
assert isinstance(i, OFPInstruction)
self.instructions = instructions
@classmethod
def parser(cls, buf, offset):
(length, event, table_id, reason, idle_timeout, hard_timeout, priority,
cookie) = struct.unpack_from(ofproto.OFP_FLOW_UPDATE_FULL_0_PACK_STR,
buf, offset)
offset += ofproto.OFP_FLOW_UPDATE_FULL_0_SIZE
assert cls.cls_flow_update_length <= length
assert cls.cls_flow_update_event == event
match = OFPMatch.parser(buf, offset)
match_length = utils.round_up(match.length, 8)
offset += match_length
inst_length = (length - ofproto.OFP_FLOW_UPDATE_FULL_0_SIZE -
match_length)
instructions = []
while inst_length > 0:
inst = OFPInstruction.parser(buf, offset)
instructions.append(inst)
offset += inst.len
inst_length -= inst.len
return cls(length, event, table_id, reason, idle_timeout,
hard_timeout, priority, cookie, match, instructions)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_ABBREV, ofproto.OFP_FLOW_UPDATE_ABBREV_SIZE)
class OFPFlowUpdateAbbrev(OFPFlowUpdateHeader):
def __init__(self, length=None, event=None, xid=None):
super(OFPFlowUpdateAbbrev, self).__init__(length, event)
self.xid = xid
@classmethod
def parser(cls, buf, offset):
length, event, xid = struct.unpack_from(
ofproto.OFP_FLOW_UPDATE_ABBREV_PACK_STR, buf, offset)
assert cls.cls_flow_update_length == length
assert cls.cls_flow_update_event == event
return cls(length, event, xid)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_PAUSED, ofproto.OFP_FLOW_UPDATE_PAUSED_SIZE)
@OFPFlowUpdateHeader.register_flow_update_event(
ofproto.OFPFME_RESUMED, ofproto.OFP_FLOW_UPDATE_PAUSED_SIZE)
class OFPFlowUpdatePaused(OFPFlowUpdateHeader):
@classmethod
def parser(cls, buf, offset):
length, event = struct.unpack_from(
ofproto.OFP_FLOW_UPDATE_PAUSED_PACK_STR, buf, offset)
assert cls.cls_flow_update_length == length
assert cls.cls_flow_update_event == event
return cls(length, event)
class OFPFlowMonitorRequestBase(OFPMultipartRequest):
def __init__(self, datapath, flags, monitor_id, out_port, out_group,
monitor_flags, table_id, command, match):
super(OFPFlowMonitorRequestBase, self).__init__(datapath, flags)
self.monitor_id = monitor_id
self.out_port = out_port
self.out_group = out_group
self.monitor_flags = monitor_flags
self.table_id = table_id
self.command = command
self.match = match
def _serialize_stats_body(self):
offset = ofproto.OFP_MULTIPART_REQUEST_SIZE
msg_pack_into(ofproto.OFP_FLOW_MONITOR_REQUEST_0_PACK_STR, self.buf,
offset, self.monitor_id, self.out_port, self.out_group,
self.monitor_flags, self.table_id, self.command)
offset += ofproto.OFP_FLOW_MONITOR_REQUEST_0_SIZE
self.match.serialize(self.buf, offset)
@_set_stats_type(ofproto.OFPMP_FLOW_MONITOR, OFPFlowUpdateHeader)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPFlowMonitorRequest(OFPFlowMonitorRequestBase):
"""
Flow monitor request message
The controller uses this message to query flow monitors.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
monitor_id Controller-assigned ID for this monitor
out_port Require matching entries to include this as an output
port
out_group Require matching entries to include this as an output
group
monitor_flags Bitmap of the following flags.
| OFPFMF_INITIAL
| OFPFMF_ADD
| OFPFMF_REMOVED
| OFPFMF_MODIFY
| OFPFMF_INSTRUCTIONS
| OFPFMF_NO_ABBREV
| OFPFMF_ONLY_OWN
table_id ID of table to monitor
command One of the following values.
| OFPFMC_ADD
| OFPFMC_MODIFY
| OFPFMC_DELETE
match Instance of ``OFPMatch``
================ ======================================================
Example::
def send_flow_monitor_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
monitor_flags = [ofp.OFPFMF_INITIAL, ofp.OFPFMF_ONLY_OWN]
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPFlowMonitorRequest(datapath, 0, 10000,
ofp.OFPP_ANY, ofp.OFPG_ANY,
monitor_flags,
ofp.OFPTT_ALL,
ofp.OFPFMC_ADD, match)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, monitor_id=0,
out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY,
monitor_flags=0, table_id=ofproto.OFPTT_ALL,
command=ofproto.OFPFMC_ADD, match=None, type_=None):
if match is None:
match = OFPMatch()
super(OFPFlowMonitorRequest, self).__init__(datapath, flags,
monitor_id, out_port,
out_group, monitor_flags,
table_id, command, match)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_FLOW_MONITOR, OFPFlowUpdateHeader)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPFlowMonitorReply(OFPMultipartReply):
"""
Flow monitor reply message
The switch responds with this message to a flow monitor request.
================ ======================================================
Attribute Description
================ ======================================================
body List of list of the following class instance.
| OFPFlowMonitorFull
| OFPFlowMonitorAbbrev
| OFPFlowMonitorPaused
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPFlowMonitorReply, MAIN_DISPATCHER)
def flow_monitor_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
flow_updates = []
for update in msg.body:
update_str = 'length=%d event=%d' %
(update.length, update.event)
if (update.event == ofp.OFPFME_INITIAL or
update.event == ofp.OFPFME_ADDED or
update.event == ofp.OFPFME_REMOVED or
update.event == ofp.OFPFME_MODIFIED):
update_str += 'table_id=%d reason=%d idle_timeout=%d '
'hard_timeout=%d priority=%d cookie=%d '
'match=%d instructions=%s' %
(update.table_id, update.reason,
update.idle_timeout, update.hard_timeout,
update.priority, update.cookie,
update.match, update.instructions)
elif update.event == ofp.OFPFME_ABBREV:
update_str += 'xid=%d' % (update.xid)
flow_updates.append(update_str)
self.logger.debug('FlowUpdates: %s', flow_updates)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPFlowMonitorReply, self).__init__(datapath, **kwargs)
class OFPBundleFeaturesProp(OFPPropBase):
_TYPES = {}
@OFPBundleFeaturesProp.register_type(ofproto.OFPTMPBF_TIME_CAPABILITY)
class OFPBundleFeaturesPropTime(OFPBundleFeaturesProp):
def __init__(self, type_=None, length=None, sched_accuracy=None,
sched_max_future=None, sched_max_past=None, timestamp=None):
super(OFPBundleFeaturesPropTime, self).__init__(type_, length)
self.sched_accuracy = sched_accuracy
self.sched_max_future = sched_max_future
self.sched_max_past = sched_max_past
self.timestamp = timestamp
@classmethod
def parser(cls, buf):
prop = cls()
(prop.type, prop.length) = struct.unpack_from(
ofproto.OFP_BUNDLE_FEATURES_PROP_TIME_0_PACK_STR, buf)
offset = ofproto.OFP_BUNDLE_FEATURES_PROP_TIME_0_SIZE
for f in ['sched_accuracy', 'sched_max_future', 'sched_max_past',
'timestamp']:
t = OFPTime.parser(buf, offset)
setattr(prop, f, t)
offset += ofproto.OFP_TIME_SIZE
return prop
def serialize(self):
# fixup
self.length = ofproto.OFP_BUNDLE_FEATURES_PROP_TIME_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_BUNDLE_FEATURES_PROP_TIME_0_PACK_STR, buf, 0,
self.type, self.length)
offset = ofproto.OFP_BUNDLE_FEATURES_PROP_TIME_0_SIZE
for f in [self.sched_accuracy, self.sched_max_future,
self.sched_max_past, self.timestamp]:
f.serialize(buf, offset)
offset += ofproto.OFP_TIME_SIZE
return buf
@OFPBundleFeaturesProp.register_type(ofproto.OFPTMPBF_EXPERIMENTER)
class OFPBundleFeaturesPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPBundleFeaturesStats(ofproto_parser.namedtuple(
'OFPBundleFeaturesStats', ('capabilities', 'properties'))):
@classmethod
def parser(cls, buf, offset):
(capabilities, ) = struct.unpack_from(
ofproto.OFP_BUNDLE_FEATURES_PACK_STR, buf, offset)
properties = []
length = ofproto.OFP_BUNDLE_FEATURES_SIZE
rest = buf[offset + length:]
while rest:
p, rest = OFPBundleFeaturesProp.parse(rest)
properties.append(p)
length += p.length
bndl = cls(capabilities, properties)
# Note: length field is not defined in the specification and
# is specific to this implementation.
bndl.length = length
return bndl
@_set_stats_type(ofproto.OFPMP_BUNDLE_FEATURES, OFPBundleFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPBundleFeaturesStatsRequest(OFPMultipartRequest):
"""
Bundle features request message
The controller uses this message to query a switch about its bundle
capabilities, including whether it supports atomic bundles, ordered
bundles, and scheduled bundles.
====================== ====================================================
Attribute Description
====================== ====================================================
flags Zero or ``OFPMPF_REQ_MORE``
feature_request_flags Bitmap of the following flags.
| OFPBF_TIMESTAMP
| OFPBF_TIME_SET_SCHED
properties List of ``OFPBundleFeaturesProp`` subclass instance
====================== ====================================================
Example::
def send_bundle_features_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPBundleFeaturesStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, feature_request_flags=0,
properties=None, type_=None):
properties = properties if properties else []
super(OFPBundleFeaturesStatsRequest, self).__init__(datapath, flags)
self.feature_request_flags = feature_request_flags
self.properties = properties
def _serialize_stats_body(self):
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
msg_pack_into(ofproto.OFP_BUNDLE_FEATURES_REQUEST_PACK_STR,
self.buf, ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.feature_request_flags)
self.buf += bin_props
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_BUNDLE_FEATURES, OFPBundleFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPBundleFeaturesStatsReply(OFPMultipartReply):
"""
Bundle features reply message
The switch responds with this message to a bundle features request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPBundleFeaturesStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPBundleFeaturesStatsReply, MAIN_DISPATCHER)
def bundle_features_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('OFPBundleFeaturesStats: capabilities=%0x%08x '
'properties=%s',
body.capabilities, repr(body.properties))
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPBundleFeaturesStatsReply, self).__init__(datapath, **kwargs)
class OFPExperimenterMultipart(ofproto_parser.namedtuple(
'OFPExperimenterMultipart',
('experimenter', 'exp_type', 'data'))):
"""
The body of OFPExperimenterStatsReply multipart messages.
================ ======================================================
Attribute Description
================ ======================================================
experimenter Experimenter ID
exp_type Experimenter defined
data Experimenter defined additional data
================ ======================================================
"""
@classmethod
def parser(cls, buf, offset):
args = struct.unpack_from(
ofproto.OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR, buf,
offset)
args = list(args)
args.append(buf[offset +
ofproto.OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE:])
stats = cls(*args)
stats.length = ofproto.OFP_METER_FEATURES_SIZE
return stats
def serialize(self):
buf = bytearray()
msg_pack_into(ofproto.OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR,
buf, 0,
self.experimenter, self.exp_type)
return buf + self.data
class OFPExperimenterStatsRequestBase(OFPMultipartRequest):
def __init__(self, datapath, flags,
experimenter, exp_type,
type_=None):
super(OFPExperimenterStatsRequestBase, self).__init__(datapath, flags)
self.experimenter = experimenter
self.exp_type = exp_type
@_set_stats_type(ofproto.OFPMP_EXPERIMENTER, OFPExperimenterMultipart)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPExperimenterStatsRequest(OFPExperimenterStatsRequestBase):
"""
Experimenter multipart request message
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
experimenter Experimenter ID
exp_type Experimenter defined
data Experimenter defined additional data
================ ======================================================
"""
def __init__(self, datapath, flags,
experimenter, exp_type, data,
type_=None):
super(OFPExperimenterStatsRequest, self).__init__(datapath, flags,
experimenter,
exp_type, type_)
self.data = data
def _serialize_stats_body(self):
body = OFPExperimenterMultipart(experimenter=self.experimenter,
exp_type=self.exp_type,
data=self.data)
self.buf += body.serialize()
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_EXPERIMENTER, OFPExperimenterMultipart)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPExperimenterStatsReply(OFPMultipartReply):
"""
Experimenter multipart reply message
================ ======================================================
Attribute Description
================ ======================================================
body An ``OFPExperimenterMultipart`` instance
================ ======================================================
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPExperimenterStatsReply, self).__init__(datapath, **kwargs)
class OFPFlowDesc(StringifyMixin):
def __init__(self, table_id=None, priority=None,
idle_timeout=None, hard_timeout=None, flags=None,
importance=None, cookie=None, match=None, stats=None,
instructions=None, length=None):
super(OFPFlowDesc, self).__init__()
self.length = length
self.table_id = table_id
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.importance = importance
self.cookie = cookie
self.match = match
self.stats = stats
self.instructions = instructions
@classmethod
def parser(cls, buf, offset):
flow_desc = cls()
(flow_desc.length, flow_desc.table_id,
flow_desc.priority, flow_desc.idle_timeout,
flow_desc.hard_timeout, flow_desc.flags,
flow_desc.importance,
flow_desc.cookie) = struct.unpack_from(
ofproto.OFP_FLOW_DESC_0_PACK_STR, buf, offset)
offset += ofproto.OFP_FLOW_DESC_0_SIZE
flow_desc.match = OFPMatch.parser(buf, offset)
match_length = utils.round_up(flow_desc.match.length, 8)
offset += match_length
flow_desc.stats = OFPStats.parser(buf, offset)
stats_length = utils.round_up(flow_desc.stats.length, 8)
offset += stats_length
instructions = []
inst_length = (flow_desc.length - (ofproto.OFP_FLOW_DESC_0_SIZE +
match_length + stats_length))
while inst_length > 0:
inst = OFPInstruction.parser(buf, offset)
instructions.append(inst)
offset += inst.len
inst_length -= inst.len
flow_desc.instructions = instructions
return flow_desc
class OFPFlowStats(StringifyMixin):
def __init__(self, table_id=None, reason=None, priority=None,
match=None, stats=None, length=None):
super(OFPFlowStats, self).__init__()
self.table_id = table_id
self.reason = reason
self.priority = priority
self.match = match
self.stats = stats
self.length = length
@classmethod
def parser(cls, buf, offset):
flow_stats = cls()
(flow_stats.length, flow_stats.table_id, flow_stats.reason,
flow_stats.priority) = struct.unpack_from(
ofproto.OFP_FLOW_STATS_0_PACK_STR, buf, offset)
offset += ofproto.OFP_FLOW_STATS_0_SIZE
flow_stats.match = OFPMatch.parser(buf, offset)
match_length = utils.round_up(flow_stats.match.length, 8)
offset += match_length
stats_length = (flow_stats.length - (ofproto.OFP_FLOW_STATS_0_SIZE +
match_length))
if stats_length > 0:
flow_stats.stats = OFPStats.parser(buf, offset)
return flow_stats
class OFPFlowStatsRequestBase(OFPMultipartRequest):
def __init__(self, datapath, flags, table_id, out_port, out_group,
cookie, cookie_mask, match):
super(OFPFlowStatsRequestBase, self).__init__(datapath, flags)
self.table_id = table_id
self.out_port = out_port
self.out_group = out_group
self.cookie = cookie
self.cookie_mask = cookie_mask
self.match = match
def _serialize_stats_body(self):
offset = ofproto.OFP_MULTIPART_REQUEST_SIZE
msg_pack_into(ofproto.OFP_FLOW_STATS_REQUEST_0_PACK_STR,
self.buf, offset, self.table_id, self.out_port,
self.out_group, self.cookie, self.cookie_mask)
offset += ofproto.OFP_FLOW_STATS_REQUEST_0_SIZE
self.match.serialize(self.buf, offset)
@_set_stats_type(ofproto.OFPMP_FLOW_DESC, OFPFlowDesc)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPFlowDescStatsRequest(OFPFlowStatsRequestBase):
"""
Individual flow descriptions request message
The controller uses this message to query individual flow descriptions.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
table_id ID of table to read
out_port Require matching entries to include this as an output
port
out_group Require matching entries to include this as an output
group
cookie Require matching entries to contain this cookie value
cookie_mask Mask used to restrict the cookie bits that must match
match Instance of ``OFPMatch``
================ ======================================================
Example::
def send_flow_desc_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPFlowDescStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY,
ofp.OFPG_ANY,
cookie, cookie_mask,
match)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, table_id=ofproto.OFPTT_ALL,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
cookie=0, cookie_mask=0, match=None, type_=None):
if match is None:
match = OFPMatch()
super(OFPFlowDescStatsRequest, self).__init__(
datapath, flags, table_id, out_port, out_group, cookie,
cookie_mask, match)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_FLOW_DESC, OFPFlowDesc)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPFlowDescStatsReply(OFPMultipartReply):
"""
Individual flow descriptions reply message
The switch responds with this message to an individual flow descriptions
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPFlowDesc`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPFlowDescStatsReply, MAIN_DISPATCHER)
def flow_desc_reply_handler(self, ev):
flows = []
for stat in ev.msg.body:
flows.append('table_id=%s priority=%d '
'idle_timeout=%d hard_timeout=%d flags=0x%04x '
'importance=%d cookie=%d match=%s '
'stats=%s instructions=%s' %
(stat.table_id, stat.priority,
stat.idle_timeout, stat.hard_timeout,
stat.flags, stat.importance,
stat.cookie, stat.match,
stat.stats, stat.instructions))
self.logger.debug('FlowDesc: %s', flows)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPFlowDescStatsReply, self).__init__(datapath, **kwargs)
@_set_stats_type(ofproto.OFPMP_FLOW_STATS, OFPFlowStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPFlowStatsRequest(OFPFlowStatsRequestBase):
"""
Individual flow statistics request message
The controller uses this message to query individual flow statistics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
table_id ID of table to read
out_port Require matching entries to include this as an output
port
out_group Require matching entries to include this as an output
group
cookie Require matching entries to contain this cookie value
cookie_mask Mask used to restrict the cookie bits that must match
match Instance of ``OFPMatch``
================ ======================================================
Example::
def send_flow_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPFlowStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY, ofp.OFPG_ANY,
cookie, cookie_mask,
match)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, table_id=ofproto.OFPTT_ALL,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
cookie=0, cookie_mask=0, match=None, type_=None):
if match is None:
match = OFPMatch()
super(OFPFlowStatsRequest, self).__init__(datapath, flags, table_id,
out_port, out_group,
cookie, cookie_mask, match)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_FLOW_STATS, OFPFlowStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPFlowStatsReply(OFPMultipartReply):
"""
Individual flow statistics reply message
The switch responds with this message to an individual flow statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPFlowStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def flow_stats_reply_handler(self, ev):
flows = []
for stat in ev.msg.body:
flows.append('table_id=%s reason=%d priority=%d '
'match=%s stats=%s' %
(stat.table_id, stat.reason, stat.priority,
stat.match, stat.stats))
self.logger.debug('FlowStats: %s', flows)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPFlowStatsReply, self).__init__(datapath, **kwargs)
class OFPAggregateStats(StringifyMixin):
def __init__(self, stats=None, length=None):
super(OFPAggregateStats, self).__init__()
self.stats = stats
# Note: length field is specific to this implementation.
# It does not have a corresponding field in the specification.
self.length = length
@classmethod
def parser(cls, buf, offset):
stats = OFPStats.parser(buf, offset)
agg = cls(stats)
agg.length = utils.round_up(stats.length, 8)
return agg
@_set_stats_type(ofproto.OFPMP_AGGREGATE_STATS, OFPAggregateStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPAggregateStatsRequest(OFPFlowStatsRequestBase):
"""
Aggregate flow statistics request message
The controller uses this message to query aggregate flow statictics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
table_id ID of table to read
out_port Require matching entries to include this as an output
port
out_group Require matching entries to include this as an output
group
cookie Require matching entries to contain this cookie value
cookie_mask Mask used to restrict the cookie bits that must match
match Instance of ``OFPMatch``
================ ======================================================
Example::
def send_aggregate_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPAggregateStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY,
ofp.OFPG_ANY,
cookie, cookie_mask,
match)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags, table_id, out_port, out_group,
cookie, cookie_mask, match, type_=None):
super(OFPAggregateStatsRequest, self).__init__(datapath,
flags,
table_id,
out_port,
out_group,
cookie,
cookie_mask,
match)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_AGGREGATE_STATS, OFPAggregateStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPAggregateStatsReply(OFPMultipartReply):
"""
Aggregate flow statistics reply message
The switch responds with this message to an aggregate flow statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPAggregateStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPAggregateStatsReply, MAIN_DISPATCHER)
def aggregate_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('AggregateStats: stats=%s', body.stats)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPAggregateStatsReply, self).__init__(datapath, **kwargs)
class OFPTableStats(ofproto_parser.namedtuple('OFPTableStats', (
'table_id', 'active_count', 'lookup_count',
'matched_count'))):
@classmethod
def parser(cls, buf, offset):
tbl = struct.unpack_from(ofproto.OFP_TABLE_STATS_PACK_STR,
buf, offset)
stats = cls(*tbl)
stats.length = ofproto.OFP_TABLE_STATS_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_TABLE_STATS, OFPTableStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPTableStatsRequest(OFPMultipartRequest):
"""
Table statistics request message
The controller uses this message to query flow table statictics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_table_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPTableStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags, type_=None):
super(OFPTableStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_TABLE_STATS, OFPTableStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPTableStatsReply(OFPMultipartReply):
"""
Table statistics reply message
The switch responds with this message to a table statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPTableStatsReply, MAIN_DISPATCHER)
def table_stats_reply_handler(self, ev):
tables = []
for stat in ev.msg.body:
tables.append('table_id=%d active_count=%d lookup_count=%d '
' matched_count=%d' %
(stat.table_id, stat.active_count,
stat.lookup_count, stat.matched_count))
self.logger.debug('TableStats: %s', tables)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPTableStatsReply, self).__init__(datapath, **kwargs)
class OFPPortStatsProp(OFPPropBase):
_TYPES = {}
@OFPPortStatsProp.register_type(ofproto.OFPPSPT_ETHERNET)
class OFPPortStatsPropEthernet(OFPPortStatsProp):
def __init__(self, type_=None, length=None, rx_frame_err=None,
rx_over_err=None, rx_crc_err=None, collisions=None):
self.type = type_
self.length = length
self.rx_frame_err = rx_frame_err
self.rx_over_err = rx_over_err
self.rx_crc_err = rx_crc_err
self.collisions = collisions
@classmethod
def parser(cls, buf):
ether = cls()
(ether.type, ether.length, ether.rx_frame_err, ether.rx_over_err,
ether.rx_crc_err, ether.collisions) = struct.unpack_from(
ofproto.OFP_PORT_STATS_PROP_ETHERNET_PACK_STR, buf, 0)
return ether
@OFPPortStatsProp.register_type(ofproto.OFPPSPT_OPTICAL)
class OFPPortStatsPropOptical(OFPPortStatsProp):
def __init__(self, type_=None, length=None, flags=None,
tx_freq_lmda=None, tx_offset=None, tx_grid_span=None,
rx_freq_lmda=None, rx_offset=None, rx_grid_span=None,
tx_pwr=None, rx_pwr=None, bias_current=None,
temperature=None):
self.type = type_
self.length = length
self.flags = flags
self.tx_freq_lmda = tx_freq_lmda
self.tx_offset = tx_offset
self.tx_grid_span = tx_grid_span
self.rx_freq_lmda = rx_freq_lmda
self.rx_offset = rx_offset
self.rx_grid_span = rx_grid_span
self.tx_pwr = tx_pwr
self.rx_pwr = rx_pwr
self.bias_current = bias_current
self.temperature = temperature
@classmethod
def parser(cls, buf):
optical = cls()
(optical.type, optical.length, optical.flags,
optical.tx_freq_lmda, optical.tx_offset, optical.tx_grid_span,
optical.rx_freq_lmda, optical.rx_offset, optical.rx_grid_span,
optical.tx_pwr, optical.rx_pwr, optical.bias_current,
optical.temperature) = struct.unpack_from(
ofproto.OFP_PORT_STATS_PROP_OPTICAL_PACK_STR, buf, 0)
return optical
@OFPPortStatsProp.register_type(ofproto.OFPPSPT_EXPERIMENTER)
class OFPPortStatsPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPPortStats(StringifyMixin):
def __init__(self, length=None, port_no=None, duration_sec=None,
duration_nsec=None, rx_packets=None, tx_packets=None,
rx_bytes=None, tx_bytes=None, rx_dropped=None,
tx_dropped=None, rx_errors=None, tx_errors=None,
properties=None):
super(OFPPortStats, self).__init__()
self.length = length
self.port_no = port_no
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.rx_packets = rx_packets
self.tx_packets = tx_packets
self.rx_bytes = rx_bytes
self.tx_bytes = tx_bytes
self.rx_dropped = rx_dropped
self.tx_dropped = tx_dropped
self.rx_errors = rx_errors
self.tx_errors = tx_errors
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(length, port_no, duration_sec, duration_nsec, rx_packets,
tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped,
rx_errors, tx_errors) = struct.unpack_from(
ofproto.OFP_PORT_STATS_PACK_STR, buf, offset)
props = []
rest = buf[offset + ofproto.OFP_PORT_STATS_SIZE:offset + length]
while rest:
p, rest = OFPPortStatsProp.parse(rest)
props.append(p)
stats = cls(length, port_no, duration_sec, duration_nsec, rx_packets,
tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped,
rx_errors, tx_errors, props)
return stats
@_set_stats_type(ofproto.OFPMP_PORT_STATS, OFPPortStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPPortStatsRequest(OFPMultipartRequest):
"""
Port statistics request message
The controller uses this message to query information about ports
statistics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
port_no Port number to read (OFPP_ANY to all ports)
================ ======================================================
Example::
def send_port_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortStatsRequest(datapath, 0, ofp.OFPP_ANY)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags, port_no, type_=None):
super(OFPPortStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_PORT_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.port_no)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_PORT_STATS, OFPPortStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPPortStatsReply(OFPMultipartReply):
"""
Port statistics reply message
The switch responds with this message to a port statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPPortStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_reply_handler(self, ev):
ports = []
for stat in ev.msg.body:
ports.append(stat.length, stat.port_no,
stat.duration_sec, stat.duration_nsec,
stat.rx_packets, stat.tx_packets,
stat.rx_bytes, stat.tx_bytes,
stat.rx_dropped, stat.tx_dropped,
stat.rx_errors, stat.tx_errors,
repr(stat.properties))
self.logger.debug('PortStats: %s', ports)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPPortStatsReply, self).__init__(datapath, **kwargs)
@_set_msg_type(ofproto.OFPT_BARRIER_REQUEST)
class OFPBarrierRequest(MsgBase):
"""
Barrier request message
The controller sends this message to ensure message dependencies have
been met or receive notifications for completed operations.
Example::
def send_barrier_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPBarrierRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPBarrierRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_BARRIER_REPLY)
class OFPBarrierReply(MsgBase):
"""
Barrier reply message
The switch responds with this message to a barrier request.
Example::
@set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
def barrier_reply_handler(self, ev):
self.logger.debug('OFPBarrierReply received')
"""
def __init__(self, datapath):
super(OFPBarrierReply, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_PORT_STATUS)
class OFPPortStatus(MsgBase):
"""
Port status message
The switch notifies controller of change of ports.
================ ======================================================
Attribute Description
================ ======================================================
reason One of the following values.
| OFPPR_ADD
| OFPPR_DELETE
| OFPPR_MODIFY
desc instance of ``OFPPort``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPPR_ADD:
reason = 'ADD'
elif msg.reason == ofp.OFPPR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPPR_MODIFY:
reason = 'MODIFY'
else:
reason = 'unknown'
self.logger.debug('OFPPortStatus received: reason=%s desc=%s',
reason, msg.desc)
"""
def __init__(self, datapath, reason=None, desc=None):
super(OFPPortStatus, self).__init__(datapath)
self.reason = reason
self.desc = desc
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPPortStatus, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.reason = struct.unpack_from(
ofproto.OFP_PORT_STATUS_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)[0]
msg.desc = OFPPort.parser(msg.buf, ofproto.OFP_PORT_STATUS_DESC_OFFSET)
return msg
@_register_parser
@_set_msg_type(ofproto.OFPT_ROLE_STATUS)
class OFPRoleStatus(MsgBase):
"""
Role status message
The switch notifies controller of change of role.
================ ======================================================
Attribute Description
================ ======================================================
role One of the following values.
| OFPCR_ROLE_NOCHANGE
| OFPCR_ROLE_EQUAL
| OFPCR_ROLE_MASTER
reason One of the following values.
| OFPCRR_MASTER_REQUEST
| OFPCRR_CONFIG
| OFPCRR_EXPERIMENTER
generation_id Master Election Generation ID
properties List of ``OFPRoleProp`` subclass instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPRoleStatus, MAIN_DISPATCHER)
def role_status_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.role == ofp.OFPCR_ROLE_NOCHANGE:
role = 'ROLE NOCHANGE'
elif msg.role == ofp.OFPCR_ROLE_EQUAL:
role = 'ROLE EQUAL'
elif msg.role == ofp.OFPCR_ROLE_MASTER:
role = 'ROLE MASTER'
else:
role = 'unknown'
if msg.reason == ofp.OFPCRR_MASTER_REQUEST:
reason = 'MASTER REQUEST'
elif msg.reason == ofp.OFPCRR_CONFIG:
reason = 'CONFIG'
elif msg.reason == ofp.OFPCRR_EXPERIMENTER:
reason = 'EXPERIMENTER'
else:
reason = 'unknown'
self.logger.debug('OFPRoleStatus received: role=%s reason=%s '
'generation_id=%d properties=%s', role, reason,
msg.generation_id, repr(msg.properties))
"""
def __init__(self, datapath, role=None, reason=None,
generation_id=None, properties=None):
super(OFPRoleStatus, self).__init__(datapath)
self.role = role
self.reason = reason
self.generation_id = generation_id
self.properties = properties
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPRoleStatus, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.role, msg.reason, msg.generation_id) = struct.unpack_from(
ofproto.OFP_ROLE_STATUS_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.properties = []
rest = msg.buf[ofproto.OFP_ROLE_STATUS_SIZE:]
while rest:
p, rest = OFPRoleProp.parse(rest)
msg.properties.append(p)
return msg
@_register_parser
@_set_msg_type(ofproto.OFPT_TABLE_STATUS)
class OFPTableStatus(MsgBase):
"""
Table status message
The switch notifies controller of change of table status.
================ ======================================================
Attribute Description
================ ======================================================
reason One of the following values.
| OFPTR_VACANCY_DOWN
| OFPTR_VACANCY_UP
table ``OFPTableDesc`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPTableStatus, MAIN_DISPATCHER)
def table(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPTR_VACANCY_DOWN:
reason = 'VACANCY_DOWN'
elif msg.reason == ofp.OFPTR_VACANCY_UP:
reason = 'VACANCY_UP'
else:
reason = 'unknown'
self.logger.debug('OFPTableStatus received: reason=%s '
'table_id=%d config=0x%08x properties=%s',
reason, msg.table.table_id, msg.table.config,
repr(msg.table.properties))
"""
def __init__(self, datapath, reason=None, table=None):
super(OFPTableStatus, self).__init__(datapath)
self.reason = reason
self.table = table
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPTableStatus, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.reason,) = struct.unpack_from(ofproto.OFP_TABLE_STATUS_0_PACK_STR,
msg.buf, ofproto.OFP_HEADER_SIZE)
msg.table = OFPTableDesc.parser(msg.buf,
ofproto.OFP_TABLE_STATUS_0_SIZE)
return msg
@_register_parser
@_set_msg_type(ofproto.OFPT_REQUESTFORWARD)
class OFPRequestForward(MsgInMsgBase):
"""
Forwarded request message
The swtich forwards request messages from one controller to other
controllers.
================ ======================================================
Attribute Description
================ ======================================================
request ``OFPGroupMod`` or ``OFPMeterMod`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPRequestForward, MAIN_DISPATCHER)
def request_forward_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.request.msg_type == ofp.OFPT_GROUP_MOD:
self.logger.debug(
'OFPRequestForward received: request=OFPGroupMod('
'command=%d, type=%d, group_id=%d, command_bucket_id=%d, '
'buckets=%s, properties=%s)',
msg.request.command, msg.request.type,
msg.request.group_id, msg.request.command_bucket_id,
msg.request.buckets, repr(msg.request.properties))
elif msg.request.msg_type == ofp.OFPT_METER_MOD:
self.logger.debug(
'OFPRequestForward received: request=OFPMeterMod('
'command=%d, flags=%d, meter_id=%d, bands=%s)',
msg.request.command, msg.request.flags,
msg.request.meter_id, msg.request.bands)
else:
self.logger.debug(
'OFPRequestForward received: request=Unknown')
"""
def __init__(self, datapath, request=None):
super(OFPRequestForward, self).__init__(datapath)
self.request = request
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPRequestForward, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
req_buf = buf[ofproto.OFP_HEADER_SIZE:]
(_ver, _type, _len, _xid) = ofproto_parser.header(req_buf)
msg.request = ofproto_parser.msg(
datapath, _ver, _type, _len, _xid, req_buf)
return msg
def _serialize_body(self):
assert isinstance(self.request, (OFPGroupMod, OFPMeterMod))
self.request.serialize()
self.buf += self.request.buf
class OFPControllerStatusProp(OFPPropBase):
_TYPES = {}
@OFPControllerStatusProp.register_type(ofproto.OFPCSPT_URI)
class OFPControllerStatusPropUri(OFPControllerStatusProp):
_TYPE = {
'ascii': [
'uri',
]
}
def __init__(self, type_=None, length=None, uri=None):
super(OFPControllerStatusPropUri, self).__init__(type_, length)
self.uri = uri
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
pack_str = '!%ds' % len(rest)
(uri, ) = struct.unpack_from(pack_str, rest, 0)
return cls(uri=uri)
@OFPControllerStatusProp.register_type(ofproto.OFPCSPT_EXPERIMENTER)
class OFPControllerStatusPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPControllerStatusStats(StringifyMixin):
"""
Controller status structure
============== =========================================================
Attribute Description
============== =========================================================
length Length of this entry.
short_id ID number which identifies the controller.
role Bitmap of controller's role flags.
| OFPCR_ROLE_NOCHANGE
| OFPCR_ROLE_EQUAL
| OFPCR_ROLE_MASTER
| OFPCR_ROLE_SLAVE
reason Bitmap of controller status reason flags.
| OFPCSR_REQUEST
| OFPCSR_CHANNEL_STATUS
| OFPCSR_ROLE
| OFPCSR_CONTROLLER_ADDED
| OFPCSR_CONTROLLER_REMOVED
| OFPCSR_SHORT_ID
| OFPCSR_EXPERIMENTER
channel_status Bitmap of control channel status flags.
| OFPCT_STATUS_UP
| OFPCT_STATUS_DOWN
properties List of ``OFPControllerStatusProp`` subclass instance
============== =========================================================
"""
def __init__(self, short_id=None, role=None, reason=None,
channel_status=None, properties=None, length=None):
super(OFPControllerStatusStats, self).__init__()
self.length = length
self.short_id = short_id
self.role = role
self.reason = reason
self.channel_status = channel_status
self.properties = properties
@classmethod
def parser(cls, buf, offset):
status = cls()
(status.length, status.short_id,
status.role, status.reason,
status.channel_status) = struct.unpack_from(
ofproto.OFP_CONTROLLER_STATUS_PACK_STR, buf, offset)
offset += ofproto.OFP_CONTROLLER_STATUS_SIZE
status.properties = []
rest = buf[offset:offset + status.length]
while rest:
p, rest = OFPControllerStatusProp.parse(rest)
status.properties.append(p)
return status
@_set_stats_type(ofproto.OFPMP_CONTROLLER_STATUS, OFPControllerStatusStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPControllerStatusStatsRequest(OFPMultipartRequest):
"""
Controller status multipart request message
The controller uses this message to request the status, the roles
and the control channels of other controllers configured on the switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_controller_status_multipart_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPControllerStatusStatsRequest,
self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_CONTROLLER_STATUS, OFPControllerStatusStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPControllerStatusStatsReply(OFPMultipartReply):
"""
Controller status multipart reply message
The switch responds with this message to a controller status
multipart request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPControllerStatus`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPControllerStatusStatsReply,
MAIN_DISPATCHER)
def controller_status_multipart_reply_handler(self, ev):
status = []
for s in ev.msg.body:
status.append('short_id=%d role=%d reason=%d '
'channel_status=%d properties=%s' %
(s.short_id, s.role, s.reason,
s.channel_status, repr(s.properties)))
self.logger.debug('OFPControllerStatusStatsReply received: %s',
status)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPControllerStatusStatsReply, self).__init__(datapath,
**kwargs)
@_register_parser
@_set_msg_type(ofproto.OFPT_CONTROLLER_STATUS)
class OFPControllerStatus(MsgBase):
"""
Controller status message
The switch informs the controller about the status of the control
channel it maintains with each controller.
================ ======================================================
Attribute Description
================ ======================================================
status ``OFPControllerStatusStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPControllerStatus, MAIN_DISPATCHER)
def table(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
status = msg.status
if status.role == ofp.OFPCR_ROLE_NOCHANGE:
role = 'NOCHANGE'
elif status.role == ofp.OFPCR_ROLE_EQUAL:
role = 'EQUAL'
elif status.role == ofp.OFPCR_ROLE_MASTER:
role = 'MASTER'
elif status.role == ofp.OFPCR_ROLE_SLAVE:
role = 'SLAVE'
else:
role = 'unknown'
if status.reason == ofp.OFPCSR_REQUEST:
reason = 'REQUEST'
elif status.reason == ofp.OFPCSR_CHANNEL_STATUS:
reason = 'CHANNEL_STATUS'
elif status.reason == ofp.OFPCSR_ROLE:
reason = 'ROLE'
elif status.reason == ofp.OFPCSR_CONTROLLER_ADDED:
reason = 'CONTROLLER_ADDED'
elif status.reason == ofp.OFPCSR_CONTROLLER_REMOVED:
reason = 'CONTROLLER_REMOVED'
elif status.reason == ofp.OFPCSR_SHORT_ID:
reason = 'SHORT_ID'
elif status.reason == ofp.OFPCSR_EXPERIMENTER:
reason = 'EXPERIMENTER'
else:
reason = 'unknown'
if status.channel_status == OFPCT_STATUS_UP:
channel_status = 'UP'
if status.channel_status == OFPCT_STATUS_DOWN:
channel_status = 'DOWN'
else:
channel_status = 'unknown'
self.logger.debug('OFPControllerStatus received: short_id=%d'
'role=%s reason=%s channel_status=%s '
'properties=%s',
status.short_id, role, reason, channel_status,
repr(status.properties))
"""
def __init__(self, datapath, status=None):
super(OFPControllerStatus, self).__init__(datapath)
self.status = status
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPControllerStatus, cls).parser(datapath, version,
msg_type, msg_len,
xid, buf)
msg.status = OFPControllerStatusStats.parser(msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
@_set_msg_type(ofproto.OFPT_PACKET_OUT)
class OFPPacketOut(MsgBase):
"""
Packet-Out message
The controller uses this message to send a packet out throught the
switch.
================ ======================================================
Attribute Description
================ ======================================================
buffer_id ID assigned by datapath (OFP_NO_BUFFER if none)
match Instance of ``OFPMatch``
(``in_port`` is mandatory in the match field)
actions list of OpenFlow action class
data Packet data of a binary type value or
an instances of packet.Packet.
================ ======================================================
Example::
def send_packet_out(self, datapath, buffer_id, in_port):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
match = OFPMatch(in_port=in_port)
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_FLOOD, 0)]
req = ofp_parser.OFPPacketOut(datapath, buffer_id,
match, actions)
datapath.send_msg(req)
"""
def __init__(self, datapath, buffer_id=None, match=None, actions=None,
data=None, actions_len=None):
super(OFPPacketOut, self).__init__(datapath)
if buffer_id is None:
self.buffer_id = ofproto.OFP_NO_BUFFER
else:
self.buffer_id = buffer_id
self.actions_len = 0
assert 'in_port' in match
self.match = match
self.actions = actions
self.data = data
def _serialize_body(self):
# adjustment
offset = ofproto.OFP_PACKET_OUT_0_SIZE
match_len = self.match.serialize(self.buf, offset)
offset += match_len
self.actions_len = 0
for a in self.actions:
a.serialize(self.buf, offset)
offset += a.len
self.actions_len += a.len
if self.buffer_id == ofproto.OFP_NO_BUFFER:
assert self.data is not None
if isinstance(self.data, packet.Packet):
self.data.serialize()
self.buf += self.data.data
else:
self.buf += self.data
else:
assert self.data is None
msg_pack_into(ofproto.OFP_PACKET_OUT_0_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.buffer_id, self.actions_len)
@classmethod
def from_jsondict(cls, dict_, decode_string=base64.b64decode,
**additional_args):
if isinstance(dict_['data'], dict):
data = dict_.pop('data')
ins = super(OFPPacketOut, cls).from_jsondict(dict_,
decode_string,
**additional_args)
ins.data = packet.Packet.from_jsondict(data['Packet'])
dict_['data'] = data
else:
ins = super(OFPPacketOut, cls).from_jsondict(dict_,
decode_string,
**additional_args)
return ins
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
Modify Flow entry message
The controller sends this message to modify the flow table.
================ ======================================================
Attribute Description
================ ======================================================
cookie Opaque controller-issued identifier
cookie_mask Mask used to restrict the cookie bits that must match
when the command is ``OPFFC_MODIFY*`` or
``OFPFC_DELETE*``
table_id ID of the table to put the flow in
command One of the following values.
| OFPFC_ADD
| OFPFC_MODIFY
| OFPFC_MODIFY_STRICT
| OFPFC_DELETE
| OFPFC_DELETE_STRICT
idle_timeout Idle time before discarding (seconds)
hard_timeout Max time before discarding (seconds)
priority Priority level of flow entry
buffer_id Buffered packet to apply to (or OFP_NO_BUFFER)
out_port For ``OFPFC_DELETE*`` commands, require matching
entries to include this as an output port
out_group For ``OFPFC_DELETE*`` commands, require matching
entries to include this as an output group
flags Bitmap of the following flags.
| OFPFF_SEND_FLOW_REM
| OFPFF_CHECK_OVERLAP
| OFPFF_RESET_COUNTS
| OFPFF_NO_PKT_COUNTS
| OFPFF_NO_BYT_COUNTS
importance Eviction precedence
match Instance of ``OFPMatch``
instructions list of ``OFPInstruction*`` instance
================ ======================================================
Example::
def send_flow_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
table_id = 0
idle_timeout = hard_timeout = 0
priority = 32768
buffer_id = ofp.OFP_NO_BUFFER
importance = 0
match = ofp_parser.OFPMatch(in_port=1, eth_dst='ff:ff:ff:ff:ff:ff')
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser.OFPFlowMod(datapath, cookie, cookie_mask,
table_id, ofp.OFPFC_ADD,
idle_timeout, hard_timeout,
priority, buffer_id,
ofp.OFPP_ANY, ofp.OFPG_ANY,
ofp.OFPFF_SEND_FLOW_REM,
importance,
match, inst)
datapath.send_msg(req)
"""
def __init__(self, datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
buffer_id=ofproto.OFP_NO_BUFFER,
out_port=0, out_group=0, flags=0, importance=0,
match=None,
instructions=None):
instructions = instructions if instructions else []
super(OFPFlowMod, self).__init__(datapath)
self.cookie = cookie
self.cookie_mask = cookie_mask
self.table_id = table_id
self.command = command
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.buffer_id = buffer_id
self.out_port = out_port
self.out_group = out_group
self.flags = flags
self.importance = importance
if match is None:
match = OFPMatch()
assert isinstance(match, OFPMatch)
self.match = match
for i in instructions:
assert isinstance(i, OFPInstruction)
self.instructions = instructions
def _serialize_body(self):
msg_pack_into(ofproto.OFP_FLOW_MOD_PACK_STR0, self.buf,
ofproto.OFP_HEADER_SIZE,
self.cookie, self.cookie_mask, self.table_id,
self.command, self.idle_timeout, self.hard_timeout,
self.priority, self.buffer_id, self.out_port,
self.out_group, self.flags, self.importance)
offset = (ofproto.OFP_FLOW_MOD_SIZE -
ofproto.OFP_MATCH_SIZE)
match_len = self.match.serialize(self.buf, offset)
offset += match_len
for inst in self.instructions:
inst.serialize(self.buf, offset)
offset += inst.len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.cookie, msg.cookie_mask, msg.table_id,
msg.command, msg.idle_timeout, msg.hard_timeout,
msg.priority, msg.buffer_id, msg.out_port,
msg.out_group, msg.flags, msg.importance) = struct.unpack_from(
ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf,
ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE
msg.match = OFPMatch.parser(buf, offset)
offset += utils.round_up(msg.match.length, 8)
instructions = []
while offset < msg_len:
i = OFPInstruction.parser(buf, offset)
instructions.append(i)
offset += i.len
msg.instructions = instructions
return msg
class OFPInstruction(StringifyMixin):
_INSTRUCTION_TYPES = {}
@staticmethod
def register_instruction_type(types):
def _register_instruction_type(cls):
for type_ in types:
OFPInstruction._INSTRUCTION_TYPES[type_] = cls
return cls
return _register_instruction_type
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from('!HH', buf, offset)
cls_ = cls._INSTRUCTION_TYPES.get(type_)
return cls_.parser(buf, offset)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_GOTO_TABLE])
class OFPInstructionGotoTable(OFPInstruction):
"""
Goto table instruction
This instruction indicates the next table in the processing pipeline.
================ ======================================================
Attribute Description
================ ======================================================
table_id Next table
================ ======================================================
"""
def __init__(self, table_id, type_=None, len_=None):
super(OFPInstructionGotoTable, self).__init__()
self.type = ofproto.OFPIT_GOTO_TABLE
self.len = ofproto.OFP_INSTRUCTION_GOTO_TABLE_SIZE
self.table_id = table_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, table_id) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_GOTO_TABLE_PACK_STR,
buf, offset)
return cls(table_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_INSTRUCTION_GOTO_TABLE_PACK_STR,
buf, offset, self.type, self.len, self.table_id)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_WRITE_METADATA])
class OFPInstructionWriteMetadata(OFPInstruction):
"""
Write metadata instruction
This instruction writes the masked metadata value into the metadata field.
================ ======================================================
Attribute Description
================ ======================================================
metadata Metadata value to write
metadata_mask Metadata write bitmask
================ ======================================================
"""
def __init__(self, metadata, metadata_mask, type_=None, len_=None):
super(OFPInstructionWriteMetadata, self).__init__()
self.type = ofproto.OFPIT_WRITE_METADATA
self.len = ofproto.OFP_INSTRUCTION_WRITE_METADATA_SIZE
self.metadata = metadata
self.metadata_mask = metadata_mask
@classmethod
def parser(cls, buf, offset):
(type_, len_, metadata, metadata_mask) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_WRITE_METADATA_PACK_STR,
buf, offset)
return cls(metadata, metadata_mask)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_INSTRUCTION_WRITE_METADATA_PACK_STR,
buf, offset, self.type, self.len, self.metadata,
self.metadata_mask)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_WRITE_ACTIONS,
ofproto.OFPIT_APPLY_ACTIONS,
ofproto.OFPIT_CLEAR_ACTIONS])
class OFPInstructionActions(OFPInstruction):
"""
Actions instruction
This instruction writes/applies/clears the actions.
================ ======================================================
Attribute Description
================ ======================================================
type One of following values.
| OFPIT_WRITE_ACTIONS
| OFPIT_APPLY_ACTIONS
| OFPIT_CLEAR_ACTIONS
actions list of OpenFlow action class
================ ======================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
"""
def __init__(self, type_, actions=None, len_=None):
super(OFPInstructionActions, self).__init__()
self.type = type_
for a in actions:
assert isinstance(a, OFPAction)
self.actions = actions
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_ACTIONS_PACK_STR,
buf, offset)
offset += ofproto.OFP_INSTRUCTION_ACTIONS_SIZE
actions = []
actions_len = len_ - ofproto.OFP_INSTRUCTION_ACTIONS_SIZE
while actions_len > 0:
a = OFPAction.parser(buf, offset)
actions.append(a)
actions_len -= a.len
offset += a.len
inst = cls(type_, actions)
inst.len = len_
return inst
def serialize(self, buf, offset):
action_offset = offset + ofproto.OFP_INSTRUCTION_ACTIONS_SIZE
if self.actions:
for a in self.actions:
a.serialize(buf, action_offset)
action_offset += a.len
self.len = action_offset - offset
pad_len = utils.round_up(self.len, 8) - self.len
msg_pack_into("%dx" % pad_len, buf, action_offset)
self.len += pad_len
msg_pack_into(ofproto.OFP_INSTRUCTION_ACTIONS_PACK_STR,
buf, offset, self.type, self.len)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_STAT_TRIGGER])
class OFPInstructionStatTrigger(OFPInstruction):
"""
Statistics triggers instruction
This instruction defines a set of statistics thresholds using OXS.
================ ======================================================
Attribute Description
================ ======================================================
flags Bitmap of the following flags.
| OFPSTF_PERIODIC
| OFPSTF_ONLY_FIRST
thresholds Instance of ``OFPStats``
================ ======================================================
"""
def __init__(self, flags, thresholds, type_=None, len_=None):
super(OFPInstructionStatTrigger, self).__init__()
self.type = ofproto.OFPIT_STAT_TRIGGER
self.len = len_
self.flags = flags
self.thresholds = thresholds
@classmethod
def parser(cls, buf, offset):
(type_, len_, flags) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR0, buf, offset)
# adjustment
offset += 8
thresholds = OFPStats.parser(buf, offset)
inst = cls(flags, thresholds)
inst.len = len_
return inst
def serialize(self, buf, offset):
stats_len = self.thresholds.serialize(buf, offset + 8)
self.len = 8 + stats_len
msg_pack_into(ofproto.OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR0,
buf, offset, self.type, self.len, self.flags)
class OFPActionHeader(StringifyMixin):
def __init__(self, type_, len_):
self.type = type_
self.len = len_
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_HEADER_PACK_STR,
buf, offset, self.type, self.len)
class OFPAction(OFPActionHeader):
_ACTION_TYPES = {}
@staticmethod
def register_action_type(type_, len_):
def _register_action_type(cls):
cls.cls_action_type = type_
cls.cls_action_len = len_
OFPAction._ACTION_TYPES[cls.cls_action_type] = cls
return cls
return _register_action_type
def __init__(self):
cls = self.__class__
super(OFPAction, self).__init__(cls.cls_action_type,
cls.cls_action_len)
@classmethod
def parser(cls, buf, offset):
type_, len_ = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
cls_ = cls._ACTION_TYPES.get(type_)
assert cls_ is not None
return cls_.parser(buf, offset)
@OFPAction.register_action_type(ofproto.OFPAT_OUTPUT,
ofproto.OFP_ACTION_OUTPUT_SIZE)
class OFPActionOutput(OFPAction):
"""
Output action
This action indicates output a packet to the switch port.
================ ======================================================
Attribute Description
================ ======================================================
port Output port
max_len Max length to send to controller
================ ======================================================
"""
def __init__(self, port, max_len=ofproto.OFPCML_MAX,
type_=None, len_=None):
super(OFPActionOutput, self).__init__()
self.port = port
self.max_len = max_len
@classmethod
def parser(cls, buf, offset):
type_, len_, port, max_len = struct.unpack_from(
ofproto.OFP_ACTION_OUTPUT_PACK_STR, buf, offset)
return cls(port, max_len)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_OUTPUT_PACK_STR, buf,
offset, self.type, self.len, self.port, self.max_len)
@OFPAction.register_action_type(ofproto.OFPAT_GROUP,
ofproto.OFP_ACTION_GROUP_SIZE)
class OFPActionGroup(OFPAction):
"""
Group action
This action indicates the group used to process the packet.
================ ======================================================
Attribute Description
================ ======================================================
group_id Group identifier
================ ======================================================
"""
def __init__(self, group_id=0, type_=None, len_=None):
super(OFPActionGroup, self).__init__()
self.group_id = group_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, group_id) = struct.unpack_from(
ofproto.OFP_ACTION_GROUP_PACK_STR, buf, offset)
return cls(group_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_GROUP_PACK_STR, buf,
offset, self.type, self.len, self.group_id)
@OFPAction.register_action_type(ofproto.OFPAT_SET_QUEUE,
ofproto.OFP_ACTION_SET_QUEUE_SIZE)
class OFPActionSetQueue(OFPAction):
"""
Set queue action
This action sets the queue id that will be used to map a flow to an
already-configured queue on a port.
================ ======================================================
Attribute Description
================ ======================================================
queue_id Queue ID for the packets
================ ======================================================
"""
def __init__(self, queue_id, type_=None, len_=None):
super(OFPActionSetQueue, self).__init__()
self.queue_id = queue_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, queue_id) = struct.unpack_from(
ofproto.OFP_ACTION_SET_QUEUE_PACK_STR, buf, offset)
return cls(queue_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_SET_QUEUE_PACK_STR, buf,
offset, self.type, self.len, self.queue_id)
@OFPAction.register_action_type(ofproto.OFPAT_SET_MPLS_TTL,
ofproto.OFP_ACTION_MPLS_TTL_SIZE)
class OFPActionSetMplsTtl(OFPAction):
"""
Set MPLS TTL action
This action sets the MPLS TTL.
================ ======================================================
Attribute Description
================ ======================================================
mpls_ttl MPLS TTL
================ ======================================================
"""
def __init__(self, mpls_ttl, type_=None, len_=None):
super(OFPActionSetMplsTtl, self).__init__()
self.mpls_ttl = mpls_ttl
@classmethod
def parser(cls, buf, offset):
(type_, len_, mpls_ttl) = struct.unpack_from(
ofproto.OFP_ACTION_MPLS_TTL_PACK_STR, buf, offset)
return cls(mpls_ttl)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_MPLS_TTL_PACK_STR, buf,
offset, self.type, self.len, self.mpls_ttl)
@OFPAction.register_action_type(ofproto.OFPAT_DEC_MPLS_TTL,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionDecMplsTtl(OFPAction):
"""
Decrement MPLS TTL action
This action decrements the MPLS TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionDecMplsTtl, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_SET_NW_TTL,
ofproto.OFP_ACTION_NW_TTL_SIZE)
class OFPActionSetNwTtl(OFPAction):
"""
Set IP TTL action
This action sets the IP TTL.
================ ======================================================
Attribute Description
================ ======================================================
nw_ttl IP TTL
================ ======================================================
"""
def __init__(self, nw_ttl, type_=None, len_=None):
super(OFPActionSetNwTtl, self).__init__()
self.nw_ttl = nw_ttl
@classmethod
def parser(cls, buf, offset):
(type_, len_, nw_ttl) = struct.unpack_from(
ofproto.OFP_ACTION_NW_TTL_PACK_STR, buf, offset)
return cls(nw_ttl)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_NW_TTL_PACK_STR, buf, offset,
self.type, self.len, self.nw_ttl)
@OFPAction.register_action_type(ofproto.OFPAT_DEC_NW_TTL,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionDecNwTtl(OFPAction):
"""
Decrement IP TTL action
This action decrements the IP TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionDecNwTtl, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_COPY_TTL_OUT,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionCopyTtlOut(OFPAction):
"""
Copy TTL Out action
This action copies the TTL from the next-to-outermost header with TTL to
the outermost header with TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionCopyTtlOut, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_COPY_TTL_IN,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionCopyTtlIn(OFPAction):
"""
Copy TTL In action
This action copies the TTL from the outermost header with TTL to the
next-to-outermost header with TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionCopyTtlIn, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_PUSH_VLAN,
ofproto.OFP_ACTION_PUSH_SIZE)
class OFPActionPushVlan(OFPAction):
"""
Push VLAN action
This action pushes a new VLAN tag to the packet.
================ ======================================================
Attribute Description
================ ======================================================
ethertype Ether type. The default is 802.1Q. (0x8100)
================ ======================================================
"""
def __init__(self, ethertype=ether.ETH_TYPE_8021Q, type_=None, len_=None):
super(OFPActionPushVlan, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_PUSH_MPLS,
ofproto.OFP_ACTION_PUSH_SIZE)
class OFPActionPushMpls(OFPAction):
"""
Push MPLS action
This action pushes a new MPLS header to the packet.
================ ======================================================
Attribute Description
================ ======================================================
ethertype Ether type
================ ======================================================
"""
def __init__(self, ethertype=ether.ETH_TYPE_MPLS, type_=None, len_=None):
super(OFPActionPushMpls, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_POP_VLAN,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionPopVlan(OFPAction):
"""
Pop VLAN action
This action pops the outermost VLAN tag from the packet.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionPopVlan, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_POP_MPLS,
ofproto.OFP_ACTION_POP_MPLS_SIZE)
class OFPActionPopMpls(OFPAction):
"""
Pop MPLS action
This action pops the MPLS header from the packet.
"""
def __init__(self, ethertype=ether.ETH_TYPE_IP, type_=None, len_=None):
super(OFPActionPopMpls, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_POP_MPLS_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_POP_MPLS_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_SET_FIELD,
ofproto.OFP_ACTION_SET_FIELD_SIZE)
class OFPActionSetField(OFPAction):
"""
Set field action
This action modifies a header field in the packet.
The set of keywords available for this is same as OFPMatch
which including with/without mask.
Example::
set_field = OFPActionSetField(eth_src="00:00:00:00:00:00")
set_field = OFPActionSetField(ipv4_src=("192.168.100.0",
"255.255.255.0"))
"""
def __init__(self, field=None, **kwargs):
super(OFPActionSetField, self).__init__()
assert len(kwargs) == 1
key = list(kwargs.keys())[0]
value = kwargs[key]
assert isinstance(key, (str, six.text_type))
self.key = key
self.value = value
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_SET_FIELD_PACK_STR, buf, offset)
(n, value, mask, _len) = ofproto.oxm_parse(buf, offset + 4)
k, uv = ofproto.oxm_to_user(n, value, mask)
action = cls(**{k: uv})
action.len = len_
return action
def serialize(self, buf, offset):
n, value, mask = ofproto.oxm_from_user(self.key, self.value)
len_ = ofproto.oxm_serialize(n, value, mask, buf, offset + 4)
self.len = utils.round_up(4 + len_, 8)
msg_pack_into('!HH', buf, offset, self.type, self.len)
pad_len = self.len - (4 + len_)
msg_pack_into("%dx" % pad_len, buf, offset + 4 + len_)
def to_jsondict(self):
return {
self.__class__.__name__: {
'field': ofproto.oxm_to_jsondict(self.key, self.value),
"len": self.len,
"type": self.type
}
}
@classmethod
def from_jsondict(cls, dict_):
k, v = ofproto.oxm_from_jsondict(dict_['field'])
return OFPActionSetField(**{k: v})
def stringify_attrs(self):
yield (self.key, self.value)
@OFPAction.register_action_type(ofproto.OFPAT_PUSH_PBB,
ofproto.OFP_ACTION_PUSH_SIZE)
class OFPActionPushPbb(OFPAction):
"""
Push PBB action
This action pushes a new PBB header to the packet.
================ ======================================================
Attribute Description
================ ======================================================
ethertype Ether type
================ ======================================================
"""
def __init__(self, ethertype, type_=None, len_=None):
super(OFPActionPushPbb, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_POP_PBB,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionPopPbb(OFPAction):
"""
Pop PBB action
This action pops the outermost PBB service instance header from
the packet.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionPopPbb, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_COPY_FIELD,
ofproto.OFP_ACTION_COPY_FIELD_SIZE)
class OFPActionCopyField(OFPAction):
"""
Copy Field action
This action copy value between header and register.
================ ======================================================
Attribute Description
================ ======================================================
n_bits Number of bits to copy.
src_offset Starting bit offset in source.
dst_offset Starting bit offset in destination.
oxm_ids List of ``OFPOxmId`` instances.
The first element of this list, src_oxm_id,
identifies the field where the value is copied from.
The second element of this list, dst_oxm_id,
identifies the field where the value is copied to.
The default is [].
================ ======================================================
"""
def __init__(self, n_bits=0, src_offset=0, dst_offset=0, oxm_ids=None,
type_=None, len_=None):
oxm_ids = oxm_ids if oxm_ids else []
super(OFPActionCopyField, self).__init__()
self.n_bits = n_bits
self.src_offset = src_offset
self.dst_offset = dst_offset
self.oxm_ids = oxm_ids
@classmethod
def parser(cls, buf, offset):
(type_, len_, n_bits, src_offset, dst_offset) = struct.unpack_from(
ofproto.OFP_ACTION_COPY_FIELD_PACK_STR, buf, offset)
offset += ofproto.OFP_ACTION_COPY_FIELD_SIZE
rest = buf[offset:offset + len_]
oxm_ids = []
while rest:
i, rest = OFPOxmId.parse(rest)
oxm_ids.append(i)
return cls(n_bits, src_offset, dst_offset, oxm_ids, type_, len_)
def serialize(self, buf, offset):
oxm_ids_buf = bytearray()
for i in self.oxm_ids:
oxm_ids_buf += i.serialize()
self.len += len(oxm_ids_buf)
msg_pack_into(ofproto.OFP_ACTION_COPY_FIELD_PACK_STR, buf,
offset, self.type, self.len,
self.n_bits, self.src_offset, self.dst_offset)
buf += oxm_ids_buf
@OFPAction.register_action_type(ofproto.OFPAT_METER,
ofproto.OFP_ACTION_METER_SIZE)
class OFPActionMeter(OFPAction):
"""
Meter action
This action applies meter (rate limiter)
================ ======================================================
Attribute Description
================ ======================================================
meter_id Meter instance
================ ======================================================
"""
def __init__(self, meter_id,
type_=None, len_=None):
super(OFPActionMeter, self).__init__()
self.meter_id = meter_id
@classmethod
def parser(cls, buf, offset):
type_, len_, meter_id = struct.unpack_from(
ofproto.OFP_ACTION_METER_PACK_STR, buf, offset)
return cls(meter_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_METER_PACK_STR, buf,
offset, self.type, self.len, self.meter_id)
@OFPAction.register_action_type(
ofproto.OFPAT_EXPERIMENTER,
ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE)
class OFPActionExperimenter(OFPAction):
"""
Experimenter action
This action is an extensible action for the experimenter.
================ ======================================================
Attribute Description
================ ======================================================
experimenter Experimenter ID
================ ======================================================
.. Note::
For the list of the supported Nicira experimenter actions,
please refer to :ref:`ryu.ofproto.nx_actions <nx_actions_structures>`.
"""
def __init__(self, experimenter):
super(OFPActionExperimenter, self).__init__()
self.type = ofproto.OFPAT_EXPERIMENTER
self.experimenter = experimenter
self.len = None
@classmethod
def parser(cls, buf, offset):
(type_, len_, experimenter) = struct.unpack_from(
ofproto.OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR, buf, offset)
data = buf[(offset + ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE
): offset + len_]
if experimenter == ofproto_common.NX_EXPERIMENTER_ID:
obj = NXAction.parse(data)
else:
obj = OFPActionExperimenterUnknown(experimenter, data)
obj.len = len_
return obj
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR,
buf, offset, self.type, self.len, self.experimenter)
class OFPActionExperimenterUnknown(OFPActionExperimenter):
def __init__(self, experimenter, data=None, type_=None, len_=None):
super(OFPActionExperimenterUnknown,
self).__init__(experimenter=experimenter)
self.data = data
def serialize(self, buf, offset):
# fixup
data = self.data
if data is None:
data = bytearray()
self.len = (utils.round_up(len(data), 8) +
ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE)
super(OFPActionExperimenterUnknown, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(self.data),
buf,
offset + ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE,
self.data)
class OFPGroupProp(OFPPropBase):
_TYPES = {}
@OFPGroupProp.register_type(ofproto.OFPGPT_EXPERIMENTER)
class OFPGroupPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
@_register_parser
@_set_msg_type(ofproto.OFPT_GROUP_MOD)
class OFPGroupMod(MsgBase):
"""
Modify group entry message
The controller sends this message to modify the group table.
================== ======================================================
Attribute Description
================== ======================================================
command One of the following values.
| OFPGC_ADD
| OFPGC_MODIFY
| OFPGC_DELETE
| OFPGC_INSERT_BUCKET
| OFPGC_REMOVE_BUCKET
type One of the following values.
| OFPGT_ALL
| OFPGT_SELECT
| OFPGT_INDIRECT
| OFPGT_FF
group_id Group identifier.
command_bucket_id Bucket Id used as part of OFPGC_INSERT_BUCKET and
OFPGC_REMOVE_BUCKET commands execution.
buckets List of ``OFPBucket`` instance
properties List of ``OFPGroupProp`` instance
================== ======================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
Example::
def send_group_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
port = 1
max_len = 2000
actions = [ofp_parser.OFPActionOutput(port, max_len)]
weight = 100
watch_port = 0
watch_group = 0
buckets = [ofp_parser.OFPBucket(weight, watch_port, watch_group,
actions)]
group_id = 1
command_bucket_id=1
req = ofp_parser.OFPGroupMod(datapath, ofp.OFPGC_ADD,
ofp.OFPGT_SELECT, group_id,
command_bucket_id, buckets)
datapath.send_msg(req)
"""
def __init__(self, datapath, command=ofproto.OFPGC_ADD,
type_=ofproto.OFPGT_ALL, group_id=0, command_bucket_id=0,
buckets=None, properties=None, bucket_array_len=None):
buckets = buckets if buckets else []
properties = properties if properties else []
super(OFPGroupMod, self).__init__(datapath)
self.command = command
self.type = type_
self.group_id = group_id
self.command_bucket_id = command_bucket_id
self.buckets = buckets
self.properties = properties
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPGroupMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.command, msg.type, msg.group_id, msg.bucket_array_len,
msg.command_bucket_id) = struct.unpack_from(
ofproto.OFP_GROUP_MOD_PACK_STR, buf, ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_GROUP_MOD_SIZE
bucket_buf = buf[offset:offset + msg.bucket_array_len]
msg.buckets = []
while bucket_buf:
bucket = OFPBucket.parser(bucket_buf, 0)
msg.buckets.append(bucket)
bucket_buf = bucket_buf[bucket.len:]
offset += msg.bucket_array_len
rest = buf[offset:offset + msg.msg_len]
while rest:
p, rest = OFPGroupProp.parse(rest)
msg.properties.append(p)
return msg
def _serialize_body(self):
offset = ofproto.OFP_GROUP_MOD_SIZE
self.bucket_array_len = 0
for b in self.buckets:
b.serialize(self.buf, offset)
offset += b.len
self.bucket_array_len += b.len
msg_pack_into(ofproto.OFP_GROUP_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.command, self.type, self.group_id,
self.bucket_array_len, self.command_bucket_id)
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
self.buf += bin_props
class OFPPortModProp(OFPPropBase):
_TYPES = {}
class OFPPortModPropEthernet(OFPPortModProp):
def __init__(self, type_=None, length=None, advertise=None):
self.type = type_
self.advertise = advertise
def serialize(self):
# fixup
self.length = struct.calcsize(
ofproto.OFP_PORT_MOD_PROP_ETHERNET_PACK_STR)
buf = bytearray()
msg_pack_into(ofproto.OFP_PORT_MOD_PROP_ETHERNET_PACK_STR,
buf, 0, self.type, self.length, self.advertise)
return buf
class OFPPortModPropOptical(OFPPortModProp):
def __init__(self, type_=None, length=None, configure=None,
freq_lmda=None, fl_offset=None, grid_span=None,
tx_pwr=None):
self.type = type_
self.length = length
self.configure = configure
self.freq_lmda = freq_lmda
self.fl_offset = fl_offset
self.grid_span = grid_span
self.tx_pwr = tx_pwr
def serialize(self):
# fixup
self.length = struct.calcsize(
ofproto.OFP_PORT_MOD_PROP_OPTICAL_PACK_STR)
buf = bytearray()
msg_pack_into(ofproto.OFP_PORT_MOD_PROP_OPTICAL_PACK_STR, buf, 0,
self.type, self.length, self.configure, self.freq_lmda,
self.fl_offset, self.grid_span, self.tx_pwr)
return buf
class OFPPortModPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
@_set_msg_type(ofproto.OFPT_PORT_MOD)
class OFPPortMod(MsgBase):
"""
Port modification message
The controller sneds this message to modify the behavior of the port.
================ ======================================================
Attribute Description
================ ======================================================
port_no Port number to modify
hw_addr The hardware address that must be the same as hw_addr
of ``OFPPort`` of ``OFPSwitchFeatures``
config Bitmap of configuration flags.
| OFPPC_PORT_DOWN
| OFPPC_NO_RECV
| OFPPC_NO_FWD
| OFPPC_NO_PACKET_IN
mask Bitmap of configuration flags above to be changed
properties List of ``OFPPortModProp`` subclass instance
================ ======================================================
Example::
def send_port_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
port_no = 3
hw_addr = 'fa:c8:e8:76:1d:7e'
config = 0
mask = (ofp.OFPPC_PORT_DOWN | ofp.OFPPC_NO_RECV |
ofp.OFPPC_NO_FWD | ofp.OFPPC_NO_PACKET_IN)
advertise = (ofp.OFPPF_10MB_HD | ofp.OFPPF_100MB_FD |
ofp.OFPPF_1GB_FD | ofp.OFPPF_COPPER |
ofp.OFPPF_AUTONEG | ofp.OFPPF_PAUSE |
ofp.OFPPF_PAUSE_ASYM)
properties = [ofp_parser.OFPPortModPropEthernet(advertise)]
req = ofp_parser.OFPPortMod(datapath, port_no, hw_addr, config,
mask, properties)
datapath.send_msg(req)
"""
_TYPE = {
'ascii': [
'hw_addr',
]
}
def __init__(self, datapath, port_no=0, hw_addr='00:00:00:00:00:00',
config=0, mask=0, properties=None):
super(OFPPortMod, self).__init__(datapath)
self.port_no = port_no
self.hw_addr = hw_addr
self.config = config
self.mask = mask
self.properties = properties or []
def _serialize_body(self):
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
msg_pack_into(ofproto.OFP_PORT_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.port_no, addrconv.mac.text_to_bin(self.hw_addr),
self.config,
self.mask)
self.buf += bin_props
class OFPGroupBucketProp(OFPPropBase):
_TYPES = {}
@OFPGroupBucketProp.register_type(ofproto.OFPGBPT_WEIGHT)
class OFPGroupBucketPropWeight(OFPGroupBucketProp):
def __init__(self, type_=None, length=None, weight=None):
super(OFPGroupBucketPropWeight, self).__init__(type_, length)
self.weight = weight
@classmethod
def parser(cls, buf):
prop = cls()
(prop.type, prop.length, prop.weight) = struct.unpack_from(
ofproto.OFP_GROUP_BUCKET_PROP_WEIGHT_PACK_STR, buf, 0)
return prop
def serialize(self):
# fixup
self.length = ofproto.OFP_GROUP_BUCKET_PROP_WEIGHT_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_GROUP_BUCKET_PROP_WEIGHT_PACK_STR, buf, 0,
self.type, self.length, self.weight)
return buf
@OFPGroupBucketProp.register_type(ofproto.OFPGBPT_WATCH_PORT)
@OFPGroupBucketProp.register_type(ofproto.OFPGBPT_WATCH_GROUP)
class OFPGroupBucketPropWatch(OFPGroupBucketProp):
def __init__(self, type_=None, length=None, watch=None):
super(OFPGroupBucketPropWatch, self).__init__(type_, length)
self.watch = watch
@classmethod
def parser(cls, buf):
prop = cls()
(prop.type, prop.length, prop.watch) = struct.unpack_from(
ofproto.OFP_GROUP_BUCKET_PROP_WATCH_PACK_STR, buf, 0)
return prop
def serialize(self):
# fixup
self.length = ofproto.OFP_GROUP_BUCKET_PROP_WATCH_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_GROUP_BUCKET_PROP_WATCH_PACK_STR, buf, 0,
self.type, self.length, self.watch)
return buf
@OFPGroupBucketProp.register_type(ofproto.OFPGBPT_EXPERIMENTER)
class OFPGroupBucketPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
class OFPBucket(StringifyMixin):
def __init__(self, bucket_id=0, actions=None, properties=None,
len_=None, action_array_len=None):
actions = actions if actions else []
properties = properties if properties else []
super(OFPBucket, self).__init__()
self.bucket_id = bucket_id
self.actions = actions
self.properties = properties
@classmethod
def parser(cls, buf, offset):
msg = cls()
(msg.len, msg.action_array_len,
msg.bucket_id) = struct.unpack_from(
ofproto.OFP_BUCKET_PACK_STR, buf, offset)
offset += ofproto.OFP_BUCKET_SIZE
action_buf = buf[offset:offset + msg.action_array_len]
msg.actions = []
while action_buf:
action = OFPAction.parser(action_buf, 0)
msg.actions.append(action)
action_buf = action_buf[action.len:]
offset += msg.action_array_len
rest = buf[offset:offset + msg.len]
msg.properties = []
while rest:
p, rest = OFPGroupBucketProp.parse(rest)
msg.properties.append(p)
return msg
def serialize(self, buf, offset):
action_offset = offset + ofproto.OFP_BUCKET_SIZE
self.action_array_len = 0
for a in self.actions:
a.serialize(buf, action_offset)
action_offset += a.len
self.action_array_len += a.len
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
props_len = len(bin_props)
self.len = utils.round_up(ofproto.OFP_BUCKET_SIZE +
self.action_array_len + props_len, 8)
msg_pack_into(ofproto.OFP_BUCKET_PACK_STR, buf, offset,
self.len, self.action_array_len, self.bucket_id)
buf += bin_props
@_set_msg_type(ofproto.OFPT_ROLE_REQUEST)
class OFPRoleRequest(MsgBase):
"""
Role request message
The controller uses this message to change its role.
================ ======================================================
Attribute Description
================ ======================================================
role One of the following values.
| OFPCR_ROLE_NOCHANGE
| OFPCR_ROLE_EQUAL
| OFPCR_ROLE_MASTER
| OFPCR_ROLE_SLAVE
short_id ID number for the controller.
The default is OFPCID_UNDEFINED.
generation_id Master Election Generation ID
================ ======================================================
Example::
def send_role_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPRoleRequest(datapath, ofp.OFPCR_ROLE_EQUAL,
ofp.OFPCID_UNDEFINED, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, role=None, short_id=None,
generation_id=None):
super(OFPRoleRequest, self).__init__(datapath)
self.role = role
self.short_id = short_id
self.generation_id = generation_id
def _serialize_body(self):
assert self.role is not None
assert self.generation_id is not None
if self.short_id is None:
self.short_id = ofproto.OFPCID_UNDEFINED
msg_pack_into(ofproto.OFP_ROLE_REQUEST_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.role, self.short_id, self.generation_id)
@_register_parser
@_set_msg_type(ofproto.OFPT_ROLE_REPLY)
class OFPRoleReply(MsgBase):
"""
Role reply message
The switch responds with this message to a role request.
================ ======================================================
Attribute Description
================ ======================================================
role One of the following values.
| OFPCR_ROLE_NOCHANGE
| OFPCR_ROLE_EQUAL
| OFPCR_ROLE_MASTER
| OFPCR_ROLE_SLAVE
short_id ID number for the controller.
The default is OFPCID_UNDEFINED.
generation_id Master Election Generation ID
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPRoleReply, MAIN_DISPATCHER)
def role_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.role == ofp.OFPCR_ROLE_NOCHANGE:
role = 'NOCHANGE'
elif msg.role == ofp.OFPCR_ROLE_EQUAL:
role = 'EQUAL'
elif msg.role == ofp.OFPCR_ROLE_MASTER:
role = 'MASTER'
elif msg.role == ofp.OFPCR_ROLE_SLAVE:
role = 'SLAVE'
else:
role = 'unknown'
self.logger.debug('OFPRoleReply received: '
'role=%s short_id=%d, generation_id=%d',
role, msg.short_id, msg.generation_id)
"""
def __init__(self, datapath, role=None, short_id=None,
generation_id=None):
super(OFPRoleReply, self).__init__(datapath)
self.role = role
self.short_id = short_id
self.generation_id = generation_id
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPRoleReply, cls).parser(datapath, version,
msg_type, msg_len, xid,
buf)
(msg.role, msg.short_id, msg.generation_id) = struct.unpack_from(
ofproto.OFP_ROLE_REQUEST_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
class OFPAsyncConfigProp(OFPPropBase):
_TYPES = {}
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_PACKET_IN_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_PACKET_IN_MASTER)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_PORT_STATUS_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_PORT_STATUS_MASTER)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_FLOW_REMOVED_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_FLOW_REMOVED_MASTER)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_ROLE_STATUS_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_ROLE_STATUS_MASTER)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_TABLE_STATUS_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_TABLE_STATUS_MASTER)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_REQUESTFORWARD_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_REQUESTFORWARD_MASTER)
class OFPAsyncConfigPropReasons(OFPAsyncConfigProp):
def __init__(self, type_=None, length=None, mask=None):
self.type = type_
self.length = length
self.mask = mask
@classmethod
def parser(cls, buf):
reasons = cls()
(reasons.type, reasons.length, reasons.mask) = struct.unpack_from(
ofproto.OFP_ASYNC_CONFIG_PROP_REASONS_PACK_STR, buf, 0)
return reasons
def serialize(self):
# fixup
self.length = ofproto.OFP_ASYNC_CONFIG_PROP_REASONS_SIZE
buf = bytearray()
msg_pack_into(ofproto.OFP_ASYNC_CONFIG_PROP_REASONS_PACK_STR, buf, 0,
self.type, self.length, self.mask)
return buf
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_EXPERIMENTER_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_EXPERIMENTER_MASTER)
class OFPAsyncConfigPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
@_set_msg_type(ofproto.OFPT_GET_ASYNC_REQUEST)
class OFPGetAsyncRequest(MsgBase):
"""
Get asynchronous configuration request message
The controller uses this message to query the asynchronous message.
Example::
def send_get_async_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGetAsyncRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPGetAsyncRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_GET_ASYNC_REPLY)
class OFPGetAsyncReply(MsgBase):
"""
Get asynchronous configuration reply message
The switch responds with this message to a get asynchronous configuration
request.
================== ====================================================
Attribute Description
================== ====================================================
properties List of ``OFPAsyncConfigProp`` subclass instances
================== ====================================================
Example::
@set_ev_cls(ofp_event.EventOFPGetAsyncReply, MAIN_DISPATCHER)
def get_async_reply_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPGetAsyncReply received: '
'properties=%s', repr(msg.properties))
"""
def __init__(self, datapath, properties=None):
super(OFPGetAsyncReply, self).__init__(datapath)
self.properties = properties
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPGetAsyncReply, cls).parser(datapath, version,
msg_type, msg_len,
xid, buf)
msg.properties = []
rest = msg.buf[ofproto.OFP_HEADER_SIZE:]
while rest:
p, rest = OFPAsyncConfigProp.parse(rest)
msg.properties.append(p)
return msg
@_set_msg_type(ofproto.OFPT_SET_ASYNC)
class OFPSetAsync(MsgBase):
"""
Set asynchronous configuration message
The controller sends this message to set the asynchronous messages that
it wants to receive on a given OpneFlow channel.
================== ====================================================
Attribute Description
================== ====================================================
properties List of ``OFPAsyncConfigProp`` subclass instances
================== ====================================================
Example::
def send_set_async(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
properties = [
ofp_parser.OFPAsyncConfigPropReasons(
ofp.OFPACPT_PACKET_IN_SLAVE, 8,
(1 << ofp.OFPR_APPLY_ACTION
| 1 << ofp.OFPR_INVALID_TTL))]
req = ofp_parser.OFPSetAsync(datapath, properties)
datapath.send_msg(req)
"""
def __init__(self, datapath, properties=None):
super(OFPSetAsync, self).__init__(datapath)
self.properties = properties
def _serialize_body(self):
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
self.buf += bin_props
@_register_parser
@_set_msg_type(ofproto.OFPT_BUNDLE_CONTROL)
class OFPBundleCtrlMsg(MsgBase):
"""
Bundle control message
The controller uses this message to create, destroy and commit bundles
================ ======================================================
Attribute Description
================ ======================================================
bundle_id Id of the bundle
type One of the following values.
| OFPBCT_OPEN_REQUEST
| OFPBCT_OPEN_REPLY
| OFPBCT_CLOSE_REQUEST
| OFPBCT_CLOSE_REPLY
| OFPBCT_COMMIT_REQUEST
| OFPBCT_COMMIT_REPLY
| OFPBCT_DISCARD_REQUEST
| OFPBCT_DISCARD_REPLY
flags Bitmap of the following flags.
| OFPBF_ATOMIC
| OFPBF_ORDERED
properties List of ``OFPBundleProp`` subclass instance
================ ======================================================
Example::
def send_bundle_control(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPBundleCtrlMsg(datapath, 7,
ofp.OFPBCT_OPEN_REQUEST,
[ofp.OFPBF_ATOMIC], [])
datapath.send_msg(req)
"""
def __init__(self, datapath, bundle_id=None, type_=None, flags=None,
properties=None):
super(OFPBundleCtrlMsg, self).__init__(datapath)
self.bundle_id = bundle_id
self.type = type_
self.flags = flags
self.properties = properties
def _serialize_body(self):
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
msg_pack_into(ofproto.OFP_BUNDLE_CTRL_MSG_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE, self.bundle_id,
self.type, self.flags)
self.buf += bin_props
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPBundleCtrlMsg, cls).parser(datapath, version,
msg_type, msg_len,
xid, buf)
(bundle_id, type_, flags) = struct.unpack_from(
ofproto.OFP_BUNDLE_CTRL_MSG_PACK_STR, buf,
ofproto.OFP_HEADER_SIZE)
msg.bundle_id = bundle_id
msg.type = type_
msg.flags = flags
msg.properties = []
rest = msg.buf[ofproto.OFP_BUNDLE_CTRL_MSG_SIZE:]
while rest:
p, rest = OFPBundleProp.parse(rest)
msg.properties.append(p)
return msg
@_set_msg_type(ofproto.OFPT_BUNDLE_ADD_MESSAGE)
class OFPBundleAddMsg(MsgInMsgBase):
"""
Bundle control message
The controller uses this message to create, destroy and commit bundles
================ ======================================================
Attribute Description
================ ======================================================
bundle_id Id of the bundle
flags Bitmap of the following flags.
| OFPBF_ATOMIC
| OFPBF_ORDERED
message ``MsgBase`` subclass instance
properties List of ``OFPBundleProp`` subclass instance
================ ======================================================
Example::
def send_bundle_add_message(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
msg = ofp_parser.OFPRoleRequest(datapath, ofp.OFPCR_ROLE_EQUAL, 0)
req = ofp_parser.OFPBundleAddMsg(datapath, 7, [ofp.OFPBF_ATOMIC],
msg, [])
datapath.send_msg(req)
"""
def __init__(self, datapath, bundle_id, flags, message, properties):
super(OFPBundleAddMsg, self).__init__(datapath)
self.bundle_id = bundle_id
self.flags = flags
self.message = message
self.properties = properties
def _serialize_body(self):
# The xid of the inner message must be the same as
# that of the outer message (OF1.4.0 7.3.9.2)
if self.message.xid != self.xid:
self.message.set_xid(self.xid)
# Message
self.message.serialize()
tail_buf = self.message.buf
# Pad
if len(self.properties) > 0:
message_len = len(tail_buf)
pad_len = utils.round_up(message_len, 8) - message_len
msg_pack_into("%dx" % pad_len, tail_buf, message_len)
# Properties
for p in self.properties:
tail_buf += p.serialize()
# Head
msg_pack_into(ofproto.OFP_BUNDLE_ADD_MSG_0_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE, self.bundle_id,
self.flags)
# Finish
self.buf += tail_buf
nx_actions.generate(
'ryu.ofproto.ofproto_v1_5',
'ryu.ofproto.ofproto_v1_5_parser'
)
|
pratikmallya/pyrax
|
refs/heads/master
|
tests/unit/test_manager.py
|
12
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import unittest
from mock import MagicMock as Mock
import pyrax.exceptions as exc
from pyrax import manager
import pyrax.utils as utils
from pyrax import fakes
fake_url = "http://example.com"
class ManagerTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ManagerTest, self).__init__(*args, **kwargs)
def setUp(self):
self.fake_api = fakes.FakeClient()
self.manager = manager.BaseManager(self.fake_api)
def tearDown(self):
self.manager = None
self.fake_api = None
def test_list(self):
mgr = self.manager
limit = utils.random_unicode()
marker = utils.random_unicode()
return_raw = utils.random_unicode()
other_keys = utils.random_unicode()
mgr._list = Mock()
mgr.uri_base = "test"
mgr.list(limit=limit, marker=marker, return_raw=return_raw,
other_keys=other_keys)
exp_uri = "/test?limit=%s&marker=%s" % (limit, marker)
mgr._list.assert_called_once_with(exp_uri, return_raw=return_raw,
other_keys=other_keys)
def test_under_list_return_raw(self):
mgr = self.manager
uri = utils.random_unicode()
resp = utils.random_unicode()
resp_body = utils.random_unicode()
mgr.api.method_get = Mock(return_value=(resp, resp_body))
ret = mgr._list(uri, return_raw=True)
mgr.api.method_get.assert_called_once_with(uri)
self.assertEqual(ret, (resp, resp_body))
def test_list_paged(self):
mgr = self.manager
mgr._list = Mock()
mgr.uri_base = "test"
limit = utils.random_unicode()
marker = utils.random_unicode()
return_raw = utils.random_unicode()
other_keys = utils.random_unicode()
mgr.list(limit=limit, marker=marker, return_raw=return_raw,
other_keys=other_keys)
expected_uri = "/test?limit=%s&marker=%s" % (limit, marker)
mgr._list.assert_called_once_with(expected_uri, return_raw=return_raw,
other_keys=other_keys)
def test_head(self):
mgr = self.manager
mgr._head = Mock()
mgr.uri_base = "test"
x = fakes.FakeException()
x.id = "fakeid"
mgr.head(x)
expected = "/%s/%s" % ("test", x.id)
mgr._head.assert_called_once_with(expected)
def test_under_head(self):
mgr = self.manager
uri = utils.random_unicode()
resp = utils.random_unicode()
resp_body = utils.random_unicode()
mgr.api.method_head = Mock(return_value=(resp, resp_body))
ret = mgr._head(uri)
mgr.api.method_head.assert_called_once_with(uri)
self.assertEqual(ret, resp)
def test_get(self):
mgr = self.manager
mgr._get = Mock()
mgr.uri_base = "test"
x = fakes.FakeException()
x.id = "fakeid"
mgr.get(x)
expected = "/%s/%s" % ("test", x.id)
mgr._get.assert_called_once_with(expected)
def test_api_get(self):
mgr = self.manager
mgr.resource_class = fakes.FakeEntity
mgr.response_key = "fake"
mgr.api.method_get = Mock(return_value=(None, {"fake": ""}))
resp = mgr._get(fake_url)
self.assert_(isinstance(resp, fakes.FakeEntity))
def test_create(self):
mgr = self.manager
mgr._create = Mock()
mgr.uri_base = "test"
mgr._create_body = Mock(return_value="body")
nm = utils.random_unicode()
mgr.create(nm)
mgr._create.assert_called_once_with("/test", "body", return_none=False,
return_raw=False, return_response=False)
def test_delete(self):
mgr = self.manager
mgr._delete = Mock()
mgr.uri_base = "test"
x = fakes.FakeException()
x.id = "fakeid"
mgr.delete(x)
expected = "/%s/%s" % ("test", x.id)
mgr._delete.assert_called_once_with(expected)
def test_under_list_post(self):
mgr = self.manager
resp = fakes.FakeResponse()
body = {"fakes": {"foo": "bar"}}
mgr.api.method_post = Mock(return_value=(resp, body))
mgr.plural_response_key = "fakes"
mgr.resource_class = fakes.FakeEntity
ret = mgr._list(fake_url, body="test")
mgr.api.method_post.assert_called_once_with(fake_url, body="test")
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 1)
self.assertTrue(isinstance(ret[0], fakes.FakeEntity))
def test_under_list_get(self):
mgr = self.manager
resp = object()
body = {"fakes": {"foo": "bar"}}
mgr.api.method_get = Mock(return_value=(resp, body))
mgr.plural_response_key = "fakes"
mgr.resource_class = fakes.FakeEntity
ret = mgr._list(fake_url)
mgr.api.method_get.assert_called_once_with(fake_url)
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 1)
self.assertTrue(isinstance(ret[0], fakes.FakeEntity))
def test_under_create_return_none(self):
mgr = self.manager
mgr.run_hooks = Mock()
mgr.api.method_post = Mock()
resp = fakes.FakeResponse()
body = None
mgr.api.method_post = Mock(return_value=(resp, body))
ret = mgr._create(fake_url, "", return_none=True, return_raw=False)
self.assertIsNone(ret)
mgr.api.method_post.assert_called_once_with(fake_url, body="")
def test_under_create_return_raw(self):
mgr = self.manager
mgr.run_hooks = Mock()
mgr.api.method_post = Mock()
resp = object()
body = {"fakes": {"foo": "bar"}}
mgr.api.method_post = Mock(return_value=(resp, body))
mgr.response_key = "fakes"
ret = mgr._create(fake_url, "", return_none=False, return_raw=True)
self.assertEqual(ret, body["fakes"])
mgr.api.method_post.assert_called_once_with(fake_url, body="")
def test_under_create_return_resource(self):
mgr = self.manager
mgr.run_hooks = Mock()
mgr.api.method_post = Mock()
resp = fakes.FakeResponse()
body = {"fakes": {"foo": "bar"}}
mgr.api.method_post = Mock(return_value=(resp, body))
mgr.resource_class = fakes.FakeEntity
mgr.response_key = "fakes"
ret = mgr._create(fake_url, "", return_none=False, return_raw=False)
self.assertTrue(isinstance(ret, fakes.FakeEntity))
mgr.api.method_post.assert_called_once_with(fake_url, body="")
def test_under_delete(self):
mgr = self.manager
mgr.api.method_delete = Mock(return_value=("resp", "body"))
mgr._delete(fake_url)
mgr.api.method_delete.assert_called_once_with(fake_url)
def test_under_update(self):
mgr = self.manager
mgr.run_hooks = Mock()
mgr.api.method_put = Mock()
resp = fakes.FakeResponse()
body = {"fakes": {"foo": "bar"}}
mgr.api.method_put = Mock(return_value=(resp, body))
mgr.resource_class = fakes.FakeEntity
mgr.response_key = "fakes"
ret = mgr._update(fake_url, "")
mgr.api.method_put.assert_called_once_with(fake_url, body="")
self.assertEqual(ret, body)
def test_action(self):
mgr = self.manager
mgr.uri_base = "testing"
mgr.api.method_post = Mock()
item = fakes.FakeEntity()
mgr.action(item, "fake")
mgr.api.method_post.assert_called_once_with("/testing/%s/action" %
item.id, body={"fake": {}})
def test_find_no_match(self):
mgr = self.manager
mgr.findall = Mock(return_value=[])
mgr.resource_class = fakes.FakeEntity
self.assertRaises(exc.NotFound, mgr.find)
def test_find_mult_match(self):
mgr = self.manager
mtch = fakes.FakeEntity()
mgr.resource_class = fakes.FakeEntity
mgr.findall = Mock(return_value=[mtch, mtch])
self.assertRaises(exc.NoUniqueMatch, mgr.find)
def test_find_single_match(self):
mgr = self.manager
mtch = fakes.FakeEntity()
mgr.resource_class = fakes.FakeEntity
mgr.findall = Mock(return_value=[mtch])
ret = mgr.find()
self.assertEqual(ret, mtch)
def test_findall(self):
mgr = self.manager
o1 = fakes.FakeEntity()
o1.some_att = "ok"
o2 = fakes.FakeEntity()
o2.some_att = "bad"
o3 = fakes.FakeEntity()
o3.some_att = "ok"
mgr.list = Mock(return_value=[o1, o2, o3])
ret = mgr.findall(some_att="ok")
self.assertTrue(o1 in ret)
self.assertFalse(o2 in ret)
self.assertTrue(o3 in ret)
def test_findall_bad_att(self):
mgr = self.manager
o1 = fakes.FakeEntity()
o1.some_att = "ok"
o2 = fakes.FakeEntity()
o2.some_att = "bad"
o3 = fakes.FakeEntity()
o3.some_att = "ok"
mgr.list = Mock(return_value=[o1, o2, o3])
ret = mgr.findall(some_att="ok", bad_att="oops")
self.assertFalse(o1 in ret)
self.assertFalse(o2 in ret)
self.assertFalse(o3 in ret)
def test_add_hook(self):
mgr = self.manager
tfunc = Mock()
mgr.add_hook("test", tfunc)
self.assertTrue("test" in mgr._hooks_map)
self.assertTrue(tfunc in mgr._hooks_map["test"])
def test_run_hooks(self):
mgr = self.manager
tfunc = Mock()
mgr.add_hook("test", tfunc)
mgr.run_hooks("test", "dummy_arg")
tfunc.assert_called_once_with("dummy_arg")
if __name__ == "__main__":
unittest.main()
|
brijeshkesariya/odoo
|
refs/heads/8.0
|
addons/point_of_sale/wizard/pos_discount.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class pos_discount(osv.osv_memory):
_name = 'pos.discount'
_description = 'Add a Global Discount'
_columns = {
'discount': fields.float('Discount (%)', required=True, digits=(16,2)),
}
_defaults = {
'discount': 5,
}
def apply_discount(self, cr, uid, ids, context=None):
"""
To give the discount of product and check the.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : nothing
"""
order_ref = self.pool.get('pos.order')
order_line_ref = self.pool.get('pos.order.line')
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
record_id = context and context.get('active_id', False)
if isinstance(record_id, (int, long)):
record_id = [record_id]
for order in order_ref.browse(cr, uid, record_id, context=context):
order_line_ref.write(cr, uid, [x.id for x in order.lines], {'discount':this.discount}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/mpl_toolkits/mplot3d/__init__.py
|
21
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .axes3d import Axes3D
|
hfegetude/PresentacionEquiposElectronicos
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
|
1869
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
mkheirkhah/mptcp
|
refs/heads/development
|
src/visualizer/visualizer/hud.py
|
189
|
import goocanvas
import core
import math
import pango
import gtk
class Axes(object):
def __init__(self, viz):
self.viz = viz
self.color = 0x8080C0FF
self.hlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.hlines.lower(None)
self.vlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.vlines.lower(None)
self.labels = []
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
def update(adj):
if self.visible:
self.update_view()
hadj.connect("value-changed", update)
vadj.connect("value-changed", update)
hadj.connect("changed", update)
vadj.connect("changed", update)
self.visible = True
self.update_view()
def set_visible(self, visible):
self.visible = visible
if self.visible:
self.hlines.props.visibility = goocanvas.ITEM_VISIBLE
self.vlines.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.hlines.props.visibility = goocanvas.ITEM_HIDDEN
self.vlines.props.visibility = goocanvas.ITEM_HIDDEN
for label in self.labels:
label.props.visibility = goocanvas.ITEM_HIDDEN
def _compute_divisions(self, xi, xf):
assert xf > xi
dx = xf - xi
size = dx
ndiv = 5
text_width = dx/ndiv/2
def rint(x):
return math.floor(x+0.5)
dx_over_ndiv = dx / ndiv
for n in range(5): # iterate 5 times to find optimum division size
#/* div: length of each division */
tbe = math.log10(dx_over_ndiv)#; /* looking for approx. 'ndiv' divisions in a length 'dx' */
div = pow(10, rint(tbe))#; /* div: power of 10 closest to dx/ndiv */
if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
div /= 2
elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
div *= 2 # /* test if div*2 is closer to dx/ndiv */
x0 = div*math.ceil(xi / div) - div
if n > 1:
ndiv = rint(size / text_width)
return x0, div
def update_view(self):
if self.viz.zoom is None:
return
unused_labels = self.labels
self.labels = []
for label in unused_labels:
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
def get_label():
try:
label = unused_labels.pop(0)
except IndexError:
label = goocanvas.Text(parent=self.viz.canvas.get_root_item(), stroke_color_rgba=self.color)
else:
label.set_property("visibility", goocanvas.ITEM_VISIBLE)
label.lower(None)
self.labels.append(label)
return label
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
zoom = self.viz.zoom.value
offset = 10/zoom
x1, y1 = self.viz.canvas.convert_from_pixels(hadj.value, vadj.value)
x2, y2 = self.viz.canvas.convert_from_pixels(hadj.value + hadj.page_size, vadj.value + vadj.page_size)
line_width = 5.0/self.viz.zoom.value
# draw the horizontal axis
self.hlines.set_property("line-width", line_width)
yc = y2 - line_width/2
sim_x1 = x1/core.PIXELS_PER_METER
sim_x2 = x2/core.PIXELS_PER_METER
x0, xdiv = self._compute_divisions(sim_x1, sim_x2)
path = ["M %r %r L %r %r" % (x1, yc, x2, yc)]
x = x0
while x < sim_x2:
path.append("M %r %r L %r %r" % (core.PIXELS_PER_METER*x, yc - offset, core.PIXELS_PER_METER*x, yc))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % x),
fill_color_rgba=self.color,
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_S,
x=core.PIXELS_PER_METER*x,
y=(yc - offset))
x += xdiv
del x
self.hlines.set_property("data", " ".join(path))
# draw the vertical axis
self.vlines.set_property("line-width", line_width)
xc = x1 + line_width/2
sim_y1 = y1/core.PIXELS_PER_METER
sim_y2 = y2/core.PIXELS_PER_METER
y0, ydiv = self._compute_divisions(sim_y1, sim_y2)
path = ["M %r %r L %r %r" % (xc, y1, xc, y2)]
y = y0
while y < sim_y2:
path.append("M %r %r L %r %r" % (xc, core.PIXELS_PER_METER*y, xc + offset, core.PIXELS_PER_METER*y))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % y),
fill_color_rgba=self.color,
alignment=pango.ALIGN_LEFT,
anchor=gtk.ANCHOR_W,
x=xc + offset,
y=core.PIXELS_PER_METER*y)
y += ydiv
self.vlines.set_property("data", " ".join(path))
self.labels.extend(unused_labels)
|
zero-rp/miniblink49
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
faust64/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_pkiprofile.py
|
8
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_pkiprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PKIProfile Avi RESTful Object
description:
- This module is used to configure PKIProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- List of certificate authorities (root and intermediate) trusted that is used for certificate validation.
created_by:
description:
- Creator name.
crl_check:
description:
- When enabled, avi will verify via crl checks that certificates in the trust chain have not been revoked.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
crls:
description:
- Certificate revocation lists.
ignore_peer_chain:
description:
- When enabled, avi will not trust intermediate and root certs presented by a client.
- Instead, only the chain certs configured in the certificate authority section will be used to verify trust of the client's cert.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
name:
description:
- Name of the pki profile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
validate_only_leaf_crl:
description:
- When enabled, avi will only validate the revocation status of the leaf certificate using crl.
- To enable validation for the entire chain, disable this option and provide all the relevant crls.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PKIProfile object
avi_pkiprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_pkiprofile
"""
RETURN = '''
obj:
description: PKIProfile (api/pkiprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
created_by=dict(type='str',),
crl_check=dict(type='bool',),
crls=dict(type='list',),
ignore_peer_chain=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
validate_only_leaf_crl=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pkiprofile',
set([]))
if __name__ == '__main__':
main()
|
thiagopnts/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/pythonpath.py
|
131
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Run a python script, adding extra directories to the python path.
"""
def main(args):
def usage():
print >>sys.stderr, "pythonpath.py -I directory script.py [args...]"
sys.exit(150)
paths = []
while True:
try:
arg = args[0]
except IndexError:
usage()
if arg == '-I':
args.pop(0)
try:
path = args.pop(0)
except IndexError:
usage()
paths.append(os.path.abspath(path))
continue
if arg.startswith('-I'):
paths.append(os.path.abspath(args.pop(0)[2:]))
continue
if arg.startswith('-D'):
os.chdir(args.pop(0)[2:])
continue
break
script = args[0]
sys.path[0:0] = [os.path.abspath(os.path.dirname(script))] + paths
sys.argv = args
sys.argc = len(args)
frozenglobals['__name__'] = '__main__'
frozenglobals['__file__'] = script
execfile(script, frozenglobals)
# Freeze scope here ... why this makes things work I have no idea ...
frozenglobals = globals()
import sys
import os
if __name__ == '__main__':
main(sys.argv[1:])
|
tcchenbtx/project-zeta-J
|
refs/heads/master
|
code/tsa_s4.py
|
3
|
from __future__ import print_function, division
import numpy as np
import numpy.linalg as npl
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import gridspec
import os
import re
import json
import nibabel as nib
from utils import subject_class as sc
from utils import outlier
from utils import diagnostics as diagnos
from utils import get_object_neural as neural
from utils import stimuli
from utils import convolution as convol
from utils import linear_model as lm
from utils import maskfunc as msk
import copy
import statsmodels.api as sm
from operator import add
# important path:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..")
figure_path = os.path.join(base_path, "code", "images", "")
file_path = os.path.join(base_path, "code", "txt", "")
# help to make directory to save figure and txt
if not os.path.exists(figure_path):
os.makedirs(figure_path)
if not os.path.exists(file_path):
os.makedirs(file_path)
# separator:
separator = "-" * 80
# which subject to work on?
subid = "sub004"
# work on results from this subject:
########################################
print (separator)
print ("Project-Zeta: use times series to study ds105 dataset")
print (separator)
print ("Focus on %s for the analysis" % subid)
print (separator)
sub = sc.subject(subid)
# get image files of this subject:
sub_img = sub.run_img_result
# get data for those figures
print ("Get data from images...")
sub_data = {}
for key, img in sub_img.iteritems():
sub_data[key] = img.get_data()
print ("Complete!")
print (separator)
brain1 = sub_data["sub004_run001"]
x1 = copy.deepcopy(sub_data)
maskedDict, volDict = msk.generateMaskedBrain(x1)
s4r1Masked = maskedDict["sub004_run001"]
s4r1Vol = volDict["sub004_run001"]
brain2 = sub_data["sub004_run002"]
s4r2Masked = maskedDict["sub004_run002"]
s4r2Vol = volDict["sub004_run002"]
brain3 = sub_data["sub004_run003"]
s4r3Masked = maskedDict["sub004_run003"]
s4r3Vol = volDict["sub004_run003"]
brain4 = sub_data["sub004_run004"]
s4r4Masked = maskedDict["sub004_run004"]
s4r4Vol = volDict["sub004_run004"]
brain5 = sub_data["sub004_run005"]
s4r5Masked = maskedDict["sub004_run005"]
s4r5Vol = volDict["sub004_run005"]
brain6 = sub_data["sub004_run006"]
s4r6Masked = maskedDict["sub004_run006"]
s4r6Vol = volDict["sub004_run006"]
brain7 = sub_data["sub004_run007"]
s4r7Masked = maskedDict["sub004_run007"]
s4r7Vol = volDict["sub004_run007"]
brain8 = sub_data["sub004_run008"]
s4r8Masked = maskedDict["sub004_run008"]
s4r8Vol = volDict["sub004_run008"]
brain9 = sub_data["sub004_run009"]
s4r9Masked = maskedDict["sub004_run009"]
s4r9Vol = volDict["sub004_run009"]
brain10 = sub_data["sub004_run010"]
s4r10Masked = maskedDict["sub004_run010"]
s4r10Vol = volDict["sub004_run010"]
brain11 = sub_data["sub004_run011"]
s4r11Masked = maskedDict["sub004_run011"]
s4r11Vol = volDict["sub004_run011"]
brain12 = sub_data["sub004_run012"]
s4r12Masked = maskedDict["sub004_run012"]
s4r12Vol = volDict["sub004_run012"]
# Focus on z = 37:42, y = 25 to 50, all of x
# brain = brain[:, 25:50, 32, :]
# s1r1Masked = s1r1Masked[:, 25:50, 32]
# brain = brain[s1r1Masked, :]
brains4r1 = brain1[:, 25:50, 37:42, :]
s4r1Masked = s4r1Masked[:, 25:50, 37:42]
brains4r1 = brains4r1[s4r1Masked, :]
brains4r2 = brain2[:, 25:50, 37:42, :]
s4r2Masked = s4r2Masked[:, 25:50, 37:42]
brains4r2 = brains4r2[s4r2Masked, :]
brains4r3 = brain3[:, 25:50, 37:42, :]
s4r3Masked = s4r3Masked[:, 25:50, 37:42]
brains4r3 = brains4r3[s4r3Masked, :]
brains4r4 = brain4[:, 25:50, 37:42, :]
s4r4Masked = s4r4Masked[:, 25:50, 37:42]
brains4r4 = brains4r4[s4r4Masked, :]
brains4r5 = brain5[:, 25:50, 37:42, :]
s4r5Masked = s4r5Masked[:, 25:50, 37:42]
brains4r5 = brains4r5[s4r5Masked, :]
brains4r6 = brain6[:, 25:50, 37:42, :]
s4r6Masked = s4r6Masked[:, 25:50, 37:42]
brains4r6 = brains4r6[s4r6Masked, :]
brains4r7 = brain7[:, 25:50, 37:42, :]
s4r7Masked = s4r7Masked[:, 25:50, 37:42]
brains4r7 = brains4r7[s4r7Masked, :]
brains4r8 = brain8[:, 25:50, 37:42, :]
s4r8Masked = s4r8Masked[:, 25:50, 37:42]
brains4r8 = brains4r8[s4r8Masked, :]
brains4r9 = brain9[:, 25:50, 37:42, :]
s4r9Masked = s4r9Masked[:, 25:50, 37:42]
brains4r9 = brains4r9[s4r9Masked, :]
brains4r10 = brain10[:, 25:50, 37:42, :]
s4r10Masked = s4r10Masked[:, 25:50, 37:42]
brains4r10 = brains4r10[s4r10Masked, :]
brains4r11 = brain11[:, 25:50, 37:42, :]
s4r11Masked = s4r11Masked[:, 25:50, 37:42]
brains4r11 = brains4r11[s4r11Masked, :]
brains4r12 = brain12[:, 25:50, 37:42, :]
s4r12Masked = s4r12Masked[:, 25:50, 37:42]
brains4r12 = brains4r12[s4r12Masked, :]
arr1 = [0.0] * 121
for i in range(121):
arr1[i] = np.mean(brains4r1[:, i])
r1house = arr1[78:88]
r1scram = arr1[64:74]
r1cat = arr1[93:103]
r1shoe = arr1[50:60]
r1bottle = arr1[107:117]
r1scissor = arr1[35:45]
r1chair = arr1[21:31]
r1face = arr1[6:16]
arr2 = [0.0] * 121
for i in range(121):
arr2[i] = np.mean(brains4r2[:, i])
r2house = arr2[107:117]
r2scram = arr2[21:31]
r2cat = arr2[6:16]
r2shoe = arr2[78:88]
r2bottle = arr2[64:74]
r2scissor = arr2[35:45]
r2chair = arr2[50:60]
r2face = arr2[93:103]
arr3 = [0.0] * 121
for i in range(121):
arr3[i] = np.mean(brains4r3[:, i])
r3house = arr3[107:117]
r3scram = arr3[21:31]
r3cat = arr3[78:88]
r3shoe = arr3[50:60]
r3bottle = arr3[64:74]
r3scissor = arr3[35:45]
r3chair = arr3[93:103]
r3face = arr3[6:16]
arr4 = [0.0] * 121
for i in range(121):
arr4[i] = np.mean(brains4r4[:, i])
r4house = arr4[50:60]
r4scram = arr4[35:45]
r4cat = arr4[21:31]
r4shoe = arr4[93:103]
r4bottle = arr4[107:117]
r4scissor = arr4[64:74]
r4chair = arr4[78:88]
r4face = arr4[6:16]
arr5 = [0.0] * 121
for i in range(121):
arr5[i] = np.mean(brains4r5[:, i])
r5house = arr5[78:88]
r5scram = arr5[21:31]
r5cat = arr5[6:16]
r5shoe = arr5[64:74]
r5bottle = arr5[50:60]
r5scissor = arr5[107:117]
r5chair = arr5[35:45]
r5face = arr5[93:103]
arr6 = [0.0] * 121
for i in range(121):
arr6[i] = np.mean(brains4r6[:, i])
r6house = arr6[93:103]
r6scram = arr6[107:117]
r6cat = arr6[21:31]
r6shoe = arr6[35:45]
r6bottle = arr6[78:88]
r6scissor = arr6[64:74]
r6chair = arr6[50:60]
r6face = arr6[6:16]
arr7 = [0.0] * 121
for i in range(121):
arr7[i] = np.mean(brains4r7[:, i])
r7house = arr7[6:16]
r7scram = arr7[21:31]
r7cat = arr7[78:88]
r7shoe = arr7[50:60]
r7bottle = arr7[93:103]
r7scissor = arr7[107:117]
r7chair = arr7[64:74]
r7face = arr7[35:45]
arr8 = [0.0] * 121
for i in range(121):
arr8[i] = np.mean(brains4r8[:, i])
r8house = arr8[21:31]
r8scram = arr8[78:88]
r8cat = arr8[50:60]
r8shoe = arr8[6:16]
r8bottle = arr8[93:103]
r8scissor = arr8[107:117]
r8chair = arr8[35:45]
r8face = arr8[64:74]
arr9 = [0.0] * 121
for i in range(121):
arr9[i] = np.mean(brains4r9[:, i])
r9house = arr9[64:74]
r9scram = arr9[78:88]
r9cat = arr9[35:45]
r9shoe = arr9[50:60]
r9bottle = arr9[93:103]
r9scissor = arr9[6:16]
r9chair = arr9[107:117]
r9face = arr9[21:31]
arr10 = [0.0] * 121
for i in range(121):
arr10[i] = np.mean(brains4r10[:, i])
r10house = arr10[6:16]
r10scram = arr10[107:117]
r10cat = arr10[93:103]
r10shoe = arr10[78:88]
r10bottle = arr10[35:45]
r10scissor = arr10[21:31]
r10chair = arr10[64:74]
r10face = arr10[50:60]
arr11 = [0.0] * 121
for i in range(121):
arr11[i] = np.mean(brains4r11[:, i])
r11house = arr11[21:31]
r11scram = arr11[50:60]
r11cat = arr11[93:103]
r11shoe = arr11[78:88]
r11bottle = arr11[6:16]
r11scissor = arr11[107:117]
r11chair = arr11[35:45]
r11face = arr11[64:74]
arr12 = [0.0] * 121
for i in range(121):
arr12[i] = np.mean(brains4r12[:, i])
r12house = arr12[50:60]
r12scram = arr12[64:74]
r12cat = arr12[35:45]
r12shoe = arr12[78:88]
r12bottle = arr12[93:103]
r12scissor = arr12[107:117]
r12chair = arr12[21:31]
r12face = arr12[6:16]
evenHouse = (np.array(r2house) + np.array(r4house) +
np.array(r6house) + np.array(r8house) +
np.array(r10house) + np.array(r12house)) / 6
oddHouse = (np.array(r1house) + np.array(r3house) +
np.array(r5house) + np.array(r7house) +
np.array(r9house) + np.array(r11house)) / 6
evenScram = (np.array(r2scram) + np.array(r4scram) +
np.array(r6scram) + np.array(r8scram) +
np.array(r10scram) + np.array(r12scram)) / 6
oddScram = (np.array(r1scram) + np.array(r3scram) +
np.array(r5scram) + np.array(r7scram) +
np.array(r9scram) + np.array(r11scram)) / 6
evenCat = (np.array(r2cat) + np.array(r4cat) +
np.array(r6cat) + np.array(r8cat) +
np.array(r10cat) + np.array(r12cat)) / 6
oddCat = (np.array(r1cat) + np.array(r3cat) +
np.array(r5cat) + np.array(r7cat) +
np.array(r9cat) + np.array(r11cat)) / 6
evenShoe = (np.array(r2shoe) + np.array(r4shoe) +
np.array(r6shoe) + np.array(r8shoe) +
np.array(r10shoe) + np.array(r12shoe)) / 6
oddShoe = (np.array(r1shoe) + np.array(r3shoe) +
np.array(r5shoe) + np.array(r7shoe) +
np.array(r9shoe) + np.array(r11shoe)) / 6
evenBottle = (np.array(r2bottle) + np.array(r4bottle) +
np.array(r6bottle) + np.array(r8bottle) +
np.array(r10bottle) + np.array(r12bottle)) / 6
oddBottle = (np.array(r1bottle) + np.array(r3bottle) +
np.array(r5bottle) + np.array(r7bottle) +
np.array(r9bottle) + np.array(r11bottle)) / 6
evenScissor = (np.array(r2scissor) + np.array(r4scissor) +
np.array(r6scissor) + np.array(r8scissor) +
np.array(r10scissor) + np.array(r12scissor)) / 6
oddScissor = (np.array(r1scissor) + np.array(r3scissor) +
np.array(r5scissor) + np.array(r7scissor) +
np.array(r9scissor) + np.array(r11scissor)) / 6
evenChair = (np.array(r2chair) + np.array(r4chair) +
np.array(r6chair) + np.array(r8chair) +
np.array(r10chair) + np.array(r12chair)) / 6
oddChair = (np.array(r1chair) + np.array(r3chair) +
np.array(r5chair) + np.array(r7chair) +
np.array(r9chair) + np.array(r11chair)) / 6
evenFace = (np.array(r2face) + np.array(r4face) +
np.array(r6face) + np.array(r8face) +
np.array(r10face) + np.array(r12face)) / 6
oddFace = (np.array(r1face) + np.array(r3face) +
np.array(r5face) + np.array(r7face) +
np.array(r9face) + np.array(r11face)) / 6
evenRun = [evenBottle, evenCat, evenChair, evenFace,
evenHouse, evenScissor, evenScram, evenShoe]
oddRun = [oddBottle, oddCat, oddChair, oddFace,
oddHouse, oddScissor, oddScram, oddShoe]
all_results = [0.0] * 64
all_results = np.reshape(all_results, (8, 8))
for i in range(8):
for j in range(8):
all_results[i, j] = np.corrcoef(evenRun[i], oddRun[j])[0, 1]
object_list = ["bottle", "cat", "chair", "face",
"house", "scissor", "scram", "shoe"]
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=all_results.round(4), colLabels=object_list,
rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.3, 0.85, "Correlation of TSA brain images of %s" % subid,
weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "subtracted_correlation_table_%s.png" % subid)
plt.close()
|
vitiral/micropython
|
refs/heads/master
|
tests/basics/builtin_override.py
|
70
|
# test overriding builtins
import builtins
# override generic builtin
builtins.abs = lambda x: x + 1
print(abs(1))
# __build_class__ is handled in a special way
builtins.__build_class__ = lambda x, y: ('class', y)
class A:
pass
print(A)
|
bmaluenda/SWITCH-Pyomo-Chile
|
refs/heads/Chile
|
switch_mod/project/unitcommit/fuel_use.py
|
1
|
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
This module describes fuel use with considerations of unit commitment
and incremental heat rates using piecewise linear expressions. If you
want to use this module directly in a list of switch modules (instead of
including the package project.unitcommit), you will also need to include
the module project.unitcommit.commit
If you haven't worked with incremental heat rates before, you may want
to start by reading a background document on incremental heat rates such
as: http://www.energy.ca.gov/papers/98-04-07_HEATRATE.PDF
Incremental heat rates are a way of approximating an "input-output
curve" (heat input vs electricity output) with a series of line
segments. These curves are typically drawn with electricity output on
the x-axis (Power, MW) and fuel use rates on the y-axis (MMBTU/h). These
curves are drawn from the minimum to maximum power output levels for a
given generator, and most generators cannot run at 0 output. The slope
of each line segment is the incremental heat rate at that point in units
of MMBTU/MWh.
Data for incremental heat rates is typically formatted in a heterogenous
manner. The first data point is the first point on the curve - the
minimum loading level (MW) and its corresponding fuel use rate
(MMBTU/h). Subsequent data points provide subseqent loading levels in MW
and slopes, or incremental heat rates in MMBTU/MWh. This format was
designed to make certain economic calculations easy, not to draw input-
output curves, but you can calculate subsequent points on the curve from
this information.
Fuel requirements for most generators can be approximated very well with
simple models of a single line segment, but the gold standard is to use
several line segments that have increasing slopes. In the future, we may
include a simpler model that uses a single line segment, but we are just
implementing the complex piecewise linear form initially to satisfy key
stakeholders.
There are two basic ways to model a piecewise linear relationship like
this in linear programming. The first approach (which we don't use in
this module) is to divide the energy production variable into several
subvariables (one for each line segment), and put an upper bound on each
subvariable so that it can't exceed the width of the segment. The total
energy production is the sum of the sub-variables, and the total fuel
consumption is: Fuel = line0_intercept + E0*incremental_heat_rate0 +
E1*incremental_heat_rate1 + ... As long as each incremental_heat_rate is
larger than the one before it, then the optimization will ensure that E1
remains at 0 until E0 is at its upper limit, which ensures consistent
results. This tiered decision method is used in the fuel_markets module,
but is not used here.
This module uses the second approach which is to make FuelUse into a
decision variable that must be greater than or equal to each of the
lines. As long as fuel has a cost associated with it, a cost minimizing
optimization will push the fuel use down till it touchs a line segments.
This method also requires that incremental heat rates increase with
energy production so that the lines collectively form a convex boundary
for fuel use.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel(
... 'timescales', 'financials', 'load_zones', 'fuels', 'gen_tech',
... 'project.build', 'project.dispatch', 'project.unitcommit')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
import os
from pyomo.environ import *
import csv
from switch_mod.utilities import approx_equal
def define_components(mod):
"""
This function adds components to a Pyomo abstract model object to
describe fuel consumption in the context of unit commitment. Unless
otherwise stated, all power capacity is specified in units of MW and
all sets and parameters are mandatory.
Typically incremental heat rates tables specify "blocks" where each
block includes power output in MW and heat requirements in MMBTU/hr
to move from the prior block to the current block. If you plot these
points and connect the dots, you have a piecewise linear function
that goes from at least minimum loading level to maximum loading
level. Data is read in in that format, then processed to describe
the individual line segments.
GEN_FUEL_USE_SEGMENTS[g in GEN_TECH_WITH_FUEL] is a set of line segments
that collectively describe fuel requirements for a given generation
technology. Each element of this set is a tuple of (y-intercept,
slope) where the y-intercept is in units of MMBTU/(hr * MW-capacity)
and slope is incremental heat rate in units of MMBTU / MWh-energy.
We normalize the y-intercept by capacity so that we can scale it to
arbitrary sizes of generation, or stacks of individual generation
units. This code can be used in conjunction with discrete unit sizes
but it not dependent on that. This set is optional.
PROJ_FUEL_USE_SEGMENTS[proj in FUEL_BASED_PROJECTS] is the same as
GEN_FUEL_USE_SEGMENTS but scoped to projects. This set is optional
and will default to GEN_FUEL_USE_SEGMENTS if that is available;
otherwise it will default to an intercept of 0 and a slope of its
full load heat rate.
"""
# Pyomo doesn't allow default for sets, so I need to specify default
# data in the data load function.
mod.GEN_FUEL_USE_SEGMENTS = Set(
mod.GEN_TECH_WITH_FUEL,
dimen=2)
mod.PROJ_FUEL_USE_SEGMENTS = Set(
mod.FUEL_BASED_PROJECTS,
dimen=2)
# Use BuildAction to populate a set's default values.
def PROJ_FUEL_USE_SEGMENTS_default_rule(m, pr):
if pr not in m.PROJ_FUEL_USE_SEGMENTS:
g = m.proj_gen_tech[pr]
if g in m.GEN_FUEL_USE_SEGMENTS:
m.PROJ_FUEL_USE_SEGMENTS[pr] = m.GEN_FUEL_USE_SEGMENTS[g]
else:
heat_rate = m.proj_full_load_heat_rate[pr]
m.PROJ_FUEL_USE_SEGMENTS[pr] = [(0, heat_rate)]
mod.PROJ_FUEL_USE_SEGMENTS_default = BuildAction(
mod.FUEL_BASED_PROJECTS,
rule=PROJ_FUEL_USE_SEGMENTS_default_rule)
mod.PROJ_DISP_FUEL_PIECEWISE_CONS_SET = Set(
dimen=4,
initialize=lambda m: set(
(proj, t, intercept, slope)
for (proj, t) in m.PROJ_WITH_FUEL_DISPATCH_POINTS
for (intercept, slope) in m.PROJ_FUEL_USE_SEGMENTS[proj]))
mod.ProjFuelUseRate_Calculate = Constraint(
mod.PROJ_DISP_FUEL_PIECEWISE_CONS_SET,
rule=lambda m, pr, t, intercept, incremental_heat_rate: (
sum(m.ProjFuelUseRate[pr, t, f] for f in m.G_FUELS[m.proj_gen_tech[pr]]) >=
# Do the startup
m.Startup[pr, t] * m.proj_startup_fuel[pr] / m.tp_duration_hrs[t] +
intercept * m.CommitProject[pr, t] +
incremental_heat_rate * m.DispatchProj[pr, t]))
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data to support modeling fuel use under partial loading
conditions with piecewise linear incremental heat rates.
These files are formatted differently than most to match the
standard format of incremental heat rates. This format is peculiar
because it formats data records that describes a fuel use curve in
two disticnt ways. The first record is the first point on the curve,
but all subsequent records are slopes and x-domain for each line
segment. For a given generation technology or project, the relevant
data should be formatted like so:
power_start_mw power_end_mw ihr fuel_use_rate
min_load . . value
min_load mid_load1 value .
mid_load1 max_load value .
The first row provides the first point on the input/output curve.
Literal dots should be included to indicate blanks.
The column fuel_use_rate is in units of MMBTU/h.
Subsequent rows provide the domain and slope of each line segement.
The column ihr indicates incremental heat rate in MMBTU/MWh.
Any number of line segments will be accepted.
All text should be replaced with actual numerical values.
I chose this format to a) be relatively consistent with standard
data that is easiest to find, b) make it difficult to misinterpret
the meaning of the data, and c) allow all of the standard data to be
included in a single file.
The following files are optional. If no representative data is
provided for a generation technology, it will default to a single
line segment with an intercept of 0 and a slope equal to the full
load heat rate. If no specific data is provided for a project, it
will default to its generation technology.
gen_inc_heat_rates.tab
generation_technology, power_start_mw, power_end_mw,
incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h
proj_inc_heat_rates.tab
project, power_start_mw, power_end_mw,
incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h
"""
path = os.path.join(inputs_dir, 'gen_inc_heat_rates.tab')
if os.path.isfile(path):
(fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file(
path, id_column="generation_technology")
# Check implied minimum loading level for consistency with
# g_min_load_fraction if g_min_load_fraction was provided. If
# g_min_load_fraction wasn't provided, set it to implied minimum
# loading level.
for g in min_load:
if 'g_min_load_fraction' not in switch_data.data():
switch_data.data()['g_min_load_fraction'] = {}
if g in switch_data.data(name='g_min_load_fraction'):
min_load_dat = switch_data.data(name='g_min_load_fraction')[g]
if not approx_equal(min_load[g], min_load_dat):
raise ValueError((
"g_min_load_fraction is inconsistant with " +
"incremental heat rate data for generation " +
"technology {}.").format(g))
else:
switch_data.data(name='g_min_load_fraction')[g] = min_load[g]
# Same thing, but for full load heat rate.
for g in full_hr:
if 'g_full_load_heat_rate' not in switch_data.data():
switch_data.data()['g_full_load_heat_rate'] = {}
if g in switch_data.data(name='g_full_load_heat_rate'):
full_hr_dat = switch_data.data(name='g_full_load_heat_rate')[g]
if abs((full_hr[g] - full_hr_dat) / full_hr_dat) > 0.01:
raise ValueError((
"g_full_load_heat_rate is inconsistant with " +
"incremental heat rate data for generation " +
"technology {}.").format(g))
else:
switch_data.data(name='g_full_load_heat_rate')[g] = full_hr[g]
# Copy parsed data into the data portal.
switch_data.data()['GEN_FUEL_USE_SEGMENTS'] = fuel_rate_segments
path = os.path.join(inputs_dir, 'proj_inc_heat_rates.tab')
if os.path.isfile(path):
(fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file(
path, id_column="project")
# Check implied minimum loading level for consistency with
# proj_min_load_fraction if proj_min_load_fraction was provided. If
# proj_min_load_fraction wasn't provided, set it to implied minimum
# loading level.
for pr in min_load:
if 'proj_min_load_fraction' not in switch_data.data():
switch_data.data()['proj_min_load_fraction'] = {}
dp_dict = switch_data.data(name='proj_min_load_fraction')
if pr in dp_dict:
min_load_dat = dp_dict[pr]
if abs((min_load[pr] - min_load_dat) / min_load_dat) > 0.01:
raise ValueError((
"proj_min_load_fraction is inconsistant with " +
"incremental heat rate data for project " +
"{}.").format(pr))
else:
dp_dict[pr] = min_load[pr]
# Same thing, but for full load heat rate.
for pr in full_hr:
if 'proj_full_load_heat_rate' not in switch_data.data():
switch_data.data()['proj_full_load_heat_rate'] = {}
dp_dict = switch_data.data(name='proj_full_load_heat_rate')
if pr in dp_dict:
full_hr_dat = dp_dict[pr]
if abs((full_hr[pr] - full_hr_dat) / full_hr_dat) > 0.01:
raise ValueError((
"proj_full_load_heat_rate is inconsistant with " +
"incremental heat rate data for project " +
"{}.").format(pr))
else:
dp_dict[pr] = full_hr[pr]
# Copy parsed data into the data portal.
switch_data.data()['PROJ_FUEL_USE_SEGMENTS'] = fuel_rate_segments
def _parse_inc_heat_rate_file(path, id_column):
"""
Parse tabular incremental heat rate data, calculate a series of
lines that describe each segment, and perform various error checks.
SYNOPSIS:
>>> import switch_mod.project.unitcommit.fuel_use as f
>>> (fuel_rate_segments, min_load, full_hr) = f._parse_inc_heat_rate_file(
... 'test_dat/inc_heat_rates.tab', 'project')
>>> fuel_rate_segments
{'H8': [(0.6083951310861414, 10.579), (0.5587921348314604, 10.667), (0.4963352059925083, 10.755), (0.4211891385767775, 10.843)], 'foo': [(0.0, 5.0), (-6.666666666666667, 15.0)], 'AES': [(1.220351351351352, 15.805), (1.0633432432432417, 16.106), (0.8583378378378379, 16.407), (0.605335135135138, 16.708)]}
>>> min_load
{'H8': 0.41760299625468167, 'foo': 0.3333333333333333, 'AES': 0.3621621621621622}
>>> full_hr
{'H8': 11.264189138576777, 'foo': 8.333333333333334, 'AES': 17.313335135135137}
"""
# fuel_rate_points[unit] = {min_power: fuel_use_rate}
fuel_rate_points = {}
# fuel_rate_segments[unit] = [(intercept1, slope1), (int2, slope2)...]
# Stores the description of each linear segment of a fuel rate curve.
fuel_rate_segments = {}
# ihr_dat stores incremental heat rate records as a list for each unit
ihr_dat = {}
# min_cap_factor[unit] and full_load_hr[unit] are for error checking.
min_cap_factor = {}
full_load_hr = {}
# Scan the file and stuff the data into dictionaries for easy access.
# Parse the file and stuff data into dictionaries indexed by units.
with open(path, 'rb') as hr_file:
dat = list(csv.DictReader(hr_file, delimiter=' '))
for row in dat:
u = row[id_column]
p1 = float(row['power_start_mw'])
p2 = row['power_end_mw']
ihr = row['incremental_heat_rate_mbtu_per_mwhr']
fr = row['fuel_use_rate_mmbtu_per_h']
# Does this row give the first point?
if(p2 == '.' and ihr == '.'):
fr = float(fr)
if(u in fuel_rate_points):
ValueError(
"Error processing incremental heat rates for " +
u + " in " + path + ". More than one row has " +
"a fuel use rate specified.")
fuel_rate_points[u] = {p1: fr}
# Does this row give a line segment?
elif(fr == '.'):
p2 = float(p2)
ihr = float(ihr)
if(u not in ihr_dat):
ihr_dat[u] = []
ihr_dat[u].append((p1, p2, ihr))
# Throw an error if the row's format is not recognized.
else:
ValueError(
"Error processing incremental heat rates for row " +
u + " in " + path + ". Row format not recognized for " +
"row " + str(row) + ". See documentation for acceptable " +
"formats.")
# Make sure that each project that have a incremental heat rates defined
# also have a starting point defined.
if ihr_dat.keys() != fuel_rate_points.keys():
ValueError(
"One or more unit did not define both a starting point " +
"and incremental heat rates for their fuel use curves.")
# Construct a convex combination of lines describing a fuel use
# curves for each representative unit "u".
for u in fuel_rate_points:
fuel_rate_segments[u] = []
fr_points = fuel_rate_points[u]
# Sort the line segments by their domains.
ihr_dat[u].sort()
# Assume that the maximum power output is the rated capacity.
(junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u])-1]
# Retrieve the first incremental heat rate for error checking.
(min_power, junk, ihr_prev) = ihr_dat[u][0]
min_cap_factor[u] = min_power / capacity
# Process each line segment.
for segment in range(0, len(ihr_dat[u])):
(p_start, p_end, ihr) = ihr_dat[u][segment]
# Error check: This incremental heat rate cannot be less than
# the previous one.
if ihr_prev > ihr:
ValueError((
"Error processing incremental heat rates for " +
"{} in file {}. The incremental heat rate " +
"between power output levels {}-{} is less than " +
"that of the prior line segment.").format(
u, path, p_start, p_end))
# Error check: This segment needs to start at an existing point.
if p_start not in fr_points:
ValueError((
"Error processing incremental heat rates for " +
"{} in file {}. The incremental heat rate " +
"between power output levels {}-{} does not start at a " +
"previously defined point or line segment.").format(
u, path, p_start, p_end))
# Calculate the y-intercept then normalize it by the capacity.
intercept_norm = (fr_points[p_start] - ihr * p_start) / capacity
# Save the line segment's definition.
fuel_rate_segments[u].append((intercept_norm, ihr))
# Add a point for the end of the segment for the next iteration.
fr_points[p_end] = fr_points[p_start] + (p_end - p_start) * ihr
ihr_prev = ihr
# Calculate the max load heat rate for error checking
full_load_hr[u] = fr_points[capacity] / capacity
return (fuel_rate_segments, min_cap_factor, full_load_hr)
|
watonyweng/neutron
|
refs/heads/master
|
neutron/db/quota/driver.py
|
4
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import exceptions
from neutron.db.quota import models as quota_models
class DbQuotaDriver(object):
"""Driver to perform necessary checks to enforce quotas and obtain quota
information.
The default driver utilizes the local database.
"""
@staticmethod
def get_tenant_quotas(context, resources, tenant_id):
"""Given a list of resources, retrieve the quotas for the given
tenant.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resource keys.
:param tenant_id: The ID of the tenant to return quotas for.
:return dict: from resource name to dict of name and limit
"""
# init with defaults
tenant_quota = dict((key, resource.default)
for key, resource in resources.items())
# update with tenant specific limits
q_qry = context.session.query(quota_models.Quota).filter_by(
tenant_id=tenant_id)
tenant_quota.update((q['resource'], q['limit']) for q in q_qry)
return tenant_quota
@staticmethod
def delete_tenant_quota(context, tenant_id):
"""Delete the quota entries for a given tenant_id.
Atfer deletion, this tenant will use default quota values in conf.
"""
with context.session.begin():
tenant_quotas = context.session.query(quota_models.Quota)
tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id)
tenant_quotas.delete()
@staticmethod
def get_all_quotas(context, resources):
"""Given a list of resources, retrieve the quotas for the all tenants.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resource keys.
:return quotas: list of dict of tenant_id:, resourcekey1:
resourcekey2: ...
"""
tenant_default = dict((key, resource.default)
for key, resource in resources.items())
all_tenant_quotas = {}
for quota in context.session.query(quota_models.Quota):
tenant_id = quota['tenant_id']
# avoid setdefault() because only want to copy when actually req'd
tenant_quota = all_tenant_quotas.get(tenant_id)
if tenant_quota is None:
tenant_quota = tenant_default.copy()
tenant_quota['tenant_id'] = tenant_id
all_tenant_quotas[tenant_id] = tenant_quota
tenant_quota[quota['resource']] = quota['limit']
return list(all_tenant_quotas.values())
@staticmethod
def update_quota_limit(context, tenant_id, resource, limit):
with context.session.begin():
tenant_quota = context.session.query(quota_models.Quota).filter_by(
tenant_id=tenant_id, resource=resource).first()
if tenant_quota:
tenant_quota.update({'limit': limit})
else:
tenant_quota = quota_models.Quota(tenant_id=tenant_id,
resource=resource,
limit=limit)
context.session.add(tenant_quota)
def _get_quotas(self, context, tenant_id, resources):
"""Retrieves the quotas for specific resources.
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param tenant_id: the tenant_id to check quota.
:param resources: A dictionary of the registered resources.
"""
# Grab and return the quotas (without usages)
quotas = DbQuotaDriver.get_tenant_quotas(
context, resources, tenant_id)
return dict((k, v) for k, v in quotas.items())
def limit_check(self, context, tenant_id, resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param tenant_id: The tenant_id to check the quota.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exceptions.InvalidQuotaValue(unders=sorted(unders))
# Get the applicable quotas
quotas = self._get_quotas(context, tenant_id, resources)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exceptions.OverQuota(overs=sorted(overs))
|
msmolens/VTK
|
refs/heads/slicer-v6.3.0-2015-07-21-426987d
|
IO/Geometry/Testing/Python/Plot3DScalars.py
|
20
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# All Plot3D scalar functions
#
# Create the RenderWindow, Renderer and both Actors
#
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
scalarLabels = ["Density", "Pressure", "Temperature", "Enthalpy",
"Internal_Energy", "Kinetic_Energy", "Velocity_Magnitude",
"Stagnation_Energy", "Entropy", "Swirl"]
scalarFunctions = ["100", "110", "120", "130",
"140", "144", "153",
"163", "170", "184"]
camera = vtk.vtkCamera()
light = vtk.vtkLight()
math = vtk.vtkMath()
# All text actors will share the same text prop
textProp = vtk.vtkTextProperty()
textProp.SetFontSize(10)
textProp.SetFontFamilyToArial()
textProp.SetColor(0, 0, 0)
i = 0
for scalarFunction in scalarFunctions:
exec("pl3d" + scalarFunction + " = vtk.vtkMultiBlockPLOT3DReader()")
eval("pl3d" + scalarFunction).SetXYZFileName(
VTK_DATA_ROOT + "/Data/bluntfinxyz.bin")
eval("pl3d" + scalarFunction).SetQFileName(
VTK_DATA_ROOT + "/Data/bluntfinq.bin")
eval("pl3d" + scalarFunction).SetScalarFunctionNumber(int(scalarFunction))
eval("pl3d" + scalarFunction).Update()
output = eval("pl3d" + scalarFunction).GetOutput().GetBlock(0)
exec("plane" + scalarFunction + " = vtk.vtkStructuredGridGeometryFilter()")
eval("plane" + scalarFunction).SetInputData(output)
eval("plane" + scalarFunction).SetExtent(25, 25, 0, 100, 0, 100)
exec("mapper" + scalarFunction + " = vtk.vtkPolyDataMapper()")
eval("mapper" + scalarFunction).SetInputConnection(
eval("plane" + scalarFunction).GetOutputPort())
eval("mapper" + scalarFunction).SetScalarRange(
output.GetPointData().GetScalars().GetRange())
exec("actor" + scalarFunction + " = vtk.vtkActor()")
eval("actor" + scalarFunction).SetMapper(eval("mapper" + scalarFunction))
exec("ren" + scalarFunction + " = vtk.vtkRenderer()")
eval("ren" + scalarFunction).SetBackground(0, 0, .5)
eval("ren" + scalarFunction).SetActiveCamera(camera)
eval("ren" + scalarFunction).AddLight(light)
renWin.AddRenderer(eval("ren" + scalarFunction))
eval("ren" + scalarFunction).SetBackground(
math.Random(.5, 1), math.Random(.5, 1), math.Random(.5, 1))
eval("ren" + scalarFunction).AddActor(eval("actor" + scalarFunction))
exec("textMapper" + scalarFunction + " = vtk.vtkTextMapper()")
eval("textMapper" + scalarFunction).SetInput(scalarLabels[i])
eval("textMapper" + scalarFunction).SetTextProperty(textProp)
# exec("text" + scalarFunction + " = vtk.vtkActor2D()")
# eval("text" + scalarFunction).SetMapper(eval("textMapper" + scalarFunction))
# eval("text" + scalarFunction).SetPosition(2, 3)
#
# eval("ren" + scalarFunction).AddActor2D(eval("text" + scalarFunction))
i += 1
#
# now layout the renderers
column = 1
row = 1
deltaX = 1.0 / 5.0
deltaY = 1.0 / 2.0
for scalarFunction in scalarFunctions:
eval("ren" + scalarFunction).SetViewport(
(column - 1) * deltaX, (row - 1) * deltaY, column * deltaX, row * deltaY)
column += 1
if (column > 5):
column = 1
row += 1
camera.SetViewUp(0, 1, 0)
camera.SetFocalPoint(0, 0, 0)
camera.SetPosition(1, 0, 0)
ren100.ResetCamera()
camera.Dolly(1.25)
ren100.ResetCameraClippingRange()
ren110.ResetCameraClippingRange()
ren120.ResetCameraClippingRange()
ren130.ResetCameraClippingRange()
ren140.ResetCameraClippingRange()
ren144.ResetCameraClippingRange()
ren153.ResetCameraClippingRange()
ren163.ResetCameraClippingRange()
ren170.ResetCameraClippingRange()
ren184.ResetCameraClippingRange()
light.SetPosition(camera.GetPosition())
light.SetFocalPoint(camera.GetFocalPoint())
renWin.SetSize(600, 180)
renWin.Render()
# render the image
#
iren.Initialize()
# iren.Start()
|
ClovisIRex/Snake-django
|
refs/heads/master
|
env/lib/python3.6/site-packages/django/contrib/gis/geos/prototypes/io.py
|
41
|
import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func = self.destructor.get_func(
*self.destructor.args, **self.destructor.kwargs
)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super(WKTWriter, self).__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
def __init__(self, dim=2):
super(WKBWriter, self).__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Returns the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return six.memoryview(wkb)
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
@srid.setter
def srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
|
shashank971/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/credit/models.py
|
8
|
# -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
import datetime
from collections import defaultdict
import logging
import pytz
from django.conf import settings
from django.core.cache import cache
from django.dispatch import receiver
from django.db import models, transaction, IntegrityError
from django.core.validators import RegexValidator
from simple_history.models import HistoricalRecords
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from xmodule_django.models import CourseKeyField
from django.utils.translation import ugettext_lazy
log = logging.getLogger(__name__)
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=r"^[a-z,A-Z,0-9,\-]+$",
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
unicode(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
return unicode(course_key) in credit_courses
@classmethod
def get_credit_course(cls, course_key):
"""
Get the credit course if exists for the given 'course_key'.
Args:
course_key(CourseKey): The course identifier
Raises:
DoesNotExist if no CreditCourse exists for the given course key.
Returns:
CreditCourse if one exists for the given course key.
"""
return cls.objects.get(course_key=course_key, enabled=True)
def __unicode__(self):
"""Unicode representation of the credit course. """
return unicode(self.course_key)
@receiver(models.signals.post_save, sender=CreditCourse)
@receiver(models.signals.post_delete, sender=CreditCourse)
def invalidate_credit_courses_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit courses. """
cache.delete(CreditCourse.CREDIT_COURSES_CACHE_KEY)
class CreditRequirement(TimeStampedModel):
"""
This model represents a credit requirement.
Each requirement is uniquely identified by its 'namespace' and
'name' fields.
The 'name' field stores the unique name or location (in case of XBlock)
for a requirement, which serves as the unique identifier for that
requirement.
The 'display_name' field stores the display name of the requirement.
The 'criteria' field dictionary provides additional information, clients
may need to determine whether a user has satisfied the requirement.
"""
course = models.ForeignKey(CreditCourse, related_name="credit_requirements")
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255, default="")
order = models.PositiveIntegerField(default=0)
criteria = JSONField()
active = models.BooleanField(default=True)
class Meta(object):
"""
Model metadata.
"""
unique_together = ('namespace', 'name', 'course')
ordering = ["order"]
@classmethod
def add_or_update_course_requirement(cls, credit_course, requirement, order):
"""
Add requirement to a given course.
Args:
credit_course(CreditCourse): The identifier for credit course
requirement(dict): Requirement dict to be added
Returns:
(CreditRequirement, created) tuple
"""
credit_requirement, created = cls.objects.get_or_create(
course=credit_course,
namespace=requirement["namespace"],
name=requirement["name"],
defaults={
"display_name": requirement["display_name"],
"criteria": requirement["criteria"],
"order": order,
"active": True
}
)
if not created:
credit_requirement.criteria = requirement["criteria"]
credit_requirement.active = True
credit_requirement.order = order
credit_requirement.display_name = requirement["display_name"]
credit_requirement.save()
return credit_requirement, created
@classmethod
def get_course_requirements(cls, course_key, namespace=None, name=None):
"""
Get credit requirements of a given course.
Args:
course_key (CourseKey): The identifier for a course
Keyword Arguments
namespace (str): Optionally filter credit requirements by namespace.
name (str): Optionally filter credit requirements by name.
Returns:
QuerySet of CreditRequirement model
"""
# order credit requirements according to their appearance in courseware
requirements = CreditRequirement.objects.filter(course__course_key=course_key, active=True)
if namespace is not None:
requirements = requirements.filter(namespace=namespace)
if name is not None:
requirements = requirements.filter(name=name)
return requirements
@classmethod
def disable_credit_requirements(cls, requirement_ids):
"""
Mark the given requirements inactive.
Args:
requirement_ids(list): List of ids
Returns:
None
"""
cls.objects.filter(id__in=requirement_ids).update(active=False)
@classmethod
def get_course_requirement(cls, course_key, namespace, name):
"""
Get credit requirement of a given course.
Args:
course_key(CourseKey): The identifier for a course
namespace(str): Namespace of credit course requirements
name(str): Name of credit course requirement
Returns:
CreditRequirement object if exists
"""
try:
return cls.objects.get(
course__course_key=course_key, active=True, namespace=namespace, name=name
)
except cls.DoesNotExist:
return None
class CreditRequirementStatus(TimeStampedModel):
"""
This model represents the status of each requirement.
For a particular credit requirement, a user can either:
1) Have satisfied the requirement (example: approved in-course reverification)
2) Have failed the requirement (example: denied in-course reverification)
3) Neither satisfied nor failed (example: the user hasn't yet attempted in-course reverification).
Cases (1) and (2) are represented by having a CreditRequirementStatus with
the status set to "satisfied" or "failed", respectively.
In case (3), no CreditRequirementStatus record will exist for the requirement and user.
"""
REQUIREMENT_STATUS_CHOICES = (
("satisfied", "satisfied"),
("failed", "failed"),
)
username = models.CharField(max_length=255, db_index=True)
requirement = models.ForeignKey(CreditRequirement, related_name="statuses")
status = models.CharField(max_length=32, choices=REQUIREMENT_STATUS_CHOICES)
# Include additional information about why the user satisfied or failed
# the requirement. This is specific to the type of requirement.
# For example, the minimum grade requirement might record the user's
# final grade when the user completes the course. This allows us to display
# the grade to users later and to send the information to credit providers.
reason = JSONField(default={})
# Maintain a history of requirement status updates for auditing purposes
history = HistoricalRecords()
class Meta(object): # pylint: disable=missing-docstring
unique_together = ('username', 'requirement')
@classmethod
def get_statuses(cls, requirements, username):
"""
Get credit requirement statuses of given requirement and username
Args:
requirement(CreditRequirement): The identifier for a requirement
username(str): username of the user
Returns:
Queryset 'CreditRequirementStatus' objects
"""
return cls.objects.filter(requirement__in=requirements, username=username)
@classmethod
@transaction.commit_on_success
def add_or_update_requirement_status(cls, username, requirement, status="satisfied", reason=None):
"""
Add credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
status(str): Status of the requirement
reason(dict): Reason of the status
"""
requirement_status, created = cls.objects.get_or_create(
username=username,
requirement=requirement,
defaults={"reason": reason, "status": status}
)
if not created:
requirement_status.status = status
requirement_status.reason = reason if reason else {}
requirement_status.save()
@classmethod
@transaction.commit_on_success
def remove_requirement_status(cls, username, requirement):
"""
Remove credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
"""
try:
requirement_status = cls.objects.get(username=username, requirement=requirement)
requirement_status.delete()
except cls.DoesNotExist:
log.exception(u'The requirement status does not exist against the username %s.', username)
return
class CreditEligibility(TimeStampedModel):
"""
A record of a user's eligibility for credit from a specific credit
provider for a specific course.
"""
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="eligibilities")
# Deadline for when credit eligibility will expire.
# Once eligibility expires, users will no longer be able to purchase
# or request credit.
# We save the deadline as a database field just in case
# we need to override the deadline for particular students.
deadline = models.DateTimeField(
default=lambda: (
datetime.datetime.now(pytz.UTC) + datetime.timedelta(
days=getattr(settings, "CREDIT_ELIGIBILITY_EXPIRATION_DAYS", 365)
)
),
help_text=ugettext_lazy("Deadline for purchasing and requesting credit.")
)
class Meta(object): # pylint: disable=missing-docstring
unique_together = ('username', 'course')
verbose_name_plural = "Credit eligibilities"
@classmethod
def update_eligibility(cls, requirements, username, course_key):
"""
Update the user's credit eligibility for a course.
A user is eligible for credit when the user has satisfied
all requirements for credit in the course.
Arguments:
requirements (Queryset): Queryset of `CreditRequirement`s to check.
username (str): Identifier of the user being updated.
course_key (CourseKey): Identifier of the course.
Returns: tuple
"""
# Check all requirements for the course to determine if the user
# is eligible. We need to check all the *requirements*
# (not just the *statuses*) in case the user doesn't yet have
# a status for a particular requirement.
status_by_req = defaultdict(lambda: False)
for status in CreditRequirementStatus.get_statuses(requirements, username):
status_by_req[status.requirement.id] = status.status
is_eligible = all(status_by_req[req.id] == "satisfied" for req in requirements)
# If we're eligible, then mark the user as being eligible for credit.
if is_eligible:
try:
CreditEligibility.objects.create(
username=username,
course=CreditCourse.objects.get(course_key=course_key),
)
return is_eligible, True
except IntegrityError:
return is_eligible, False
else:
return is_eligible, False
@classmethod
def get_user_eligibilities(cls, username):
"""
Returns the eligibilities of given user.
Args:
username(str): Username of the user
Returns:
CreditEligibility queryset for the user
"""
return cls.objects.filter(
username=username,
course__enabled=True,
deadline__gt=datetime.datetime.now(pytz.UTC)
).select_related('course')
@classmethod
def is_user_eligible_for_credit(cls, course_key, username):
"""
Check if the given user is eligible for the provided credit course
Args:
course_key(CourseKey): The course identifier
username(str): The username of the user
Returns:
Bool True if the user eligible for credit course else False
"""
return cls.objects.filter(
course__course_key=course_key,
course__enabled=True,
username=username,
deadline__gt=datetime.datetime.now(pytz.UTC),
).exists()
def __unicode__(self):
"""Unicode representation of the credit eligibility. """
return u"{user}, {course}".format(
user=self.username,
course=self.course.course_key,
)
class CreditRequest(TimeStampedModel):
"""
A request for credit from a particular credit provider.
When a user initiates a request for credit, a CreditRequest record will be created.
Each CreditRequest is assigned a unique identifier so we can find it when the request
is approved by the provider. The CreditRequest record stores the parameters to be sent
at the time the request is made. If the user re-issues the request
(perhaps because the user did not finish filling in forms on the credit provider's site),
the request record will be updated, but the UUID will remain the same.
"""
uuid = models.CharField(max_length=32, unique=True, db_index=True)
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="credit_requests")
provider = models.ForeignKey(CreditProvider, related_name="credit_requests")
parameters = JSONField()
REQUEST_STATUS_PENDING = "pending"
REQUEST_STATUS_APPROVED = "approved"
REQUEST_STATUS_REJECTED = "rejected"
REQUEST_STATUS_CHOICES = (
(REQUEST_STATUS_PENDING, "Pending"),
(REQUEST_STATUS_APPROVED, "Approved"),
(REQUEST_STATUS_REJECTED, "Rejected"),
)
status = models.CharField(
max_length=255,
choices=REQUEST_STATUS_CHOICES,
default=REQUEST_STATUS_PENDING
)
history = HistoricalRecords()
class Meta(object): # pylint: disable=missing-docstring
# Enforce the constraint that each user can have exactly one outstanding
# request to a given provider. Multiple requests use the same UUID.
unique_together = ('username', 'course', 'provider')
get_latest_by = 'created'
@classmethod
def credit_requests_for_user(cls, username):
"""
Retrieve all credit requests for a user.
Arguments:
username (unicode): The username of the user.
Returns: list
Example Usage:
>>> CreditRequest.credit_requests_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return [
{
"uuid": request.uuid,
"timestamp": request.parameters.get("timestamp"),
"course_key": request.course.course_key,
"provider": {
"id": request.provider.provider_id,
"display_name": request.provider.display_name
},
"status": request.status
}
for request in cls.objects.select_related('course', 'provider').filter(username=username)
]
@classmethod
def get_user_request_status(cls, username, course_key):
"""
Returns the latest credit request of user against the given course.
Args:
username(str): The username of requesting user
course_key(CourseKey): The course identifier
Returns:
CreditRequest if any otherwise None
"""
try:
return cls.objects.filter(
username=username, course__course_key=course_key
).select_related('course', 'provider').latest()
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of a credit request."""
return u"{course}, {provider}, {status}".format(
course=self.course.course_key,
provider=self.provider.provider_id, # pylint: disable=no-member
status=self.status,
)
|
georgid/sms-tools
|
refs/heads/georgid-withMelodia
|
lectures/5-Sinusoidal-model/plots-code/spectral-sine-synthesis.py
|
24
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft, fftshift
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
Ns = 256
hNs = Ns/2
yw = np.zeros(Ns)
fs = 44100
freqs = np.array([1000.0, 4000.0, 8000.0])
amps = np.array([.6, .4, .6])
phases = ([0.5, 1.2, 2.3])
yploc = Ns*freqs/fs
ypmag = 20*np.log10(amps/2.0)
ypphase = phases
Y = UF.genSpecSines(freqs, ypmag, ypphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(fs*np.arange(Ns/2)/Ns, mY, 'r', lw=1.5)
plt.axis([0, fs/2.0,-100,0])
plt.title("mY, freqs (Hz) = 1000, 4000, 8000; amps = .6, .4, .6")
plt.subplot(3,1,2)
pY[pY==0]= np.nan
plt.plot(fs*np.arange(Ns/2)/Ns, pY, 'c', lw=1.5)
plt.axis([0, fs/2.0,-.01,3.0])
plt.title("pY, phases (radians) = .5, 1.2, 2.3")
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs, hNs), y, 'b', lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)])
plt.title("y")
plt.tight_layout()
plt.savefig('spectral-sine-synthesis.png')
plt.show()
|
zorroblue/scikit-learn
|
refs/heads/master
|
benchmarks/bench_lasso.py
|
111
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
lidavidm/mathics-heroku
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/admin/validation.py
|
108
|
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import BaseModelForm, BaseModelFormSet, _get_foreign_key
from django.contrib.admin.util import get_fields_from_path, NotRelationField
"""
Does basic ModelAdmin option validation. Calls custom validation
classmethod in the end if it is provided in cls. The signature of the
custom validation classmethod should be: def validate(cls, model).
"""
__all__ = ['BaseValidator', 'InlineValidator']
class BaseValidator(object):
def __init__(self):
# Before we can introspect models, they need to be fully loaded so that
# inter-relations are set up correctly. We force that here.
models.get_apps()
def validate(self, cls, model):
for m in dir(self):
if m.startswith('validate_'):
getattr(self, m)(cls, model)
def check_field_spec(self, cls, model, flds, label):
"""
Validate the fields specification in `flds` from a ModelAdmin subclass
`cls` for the `model` model. Use `label` for reporting problems to the user.
The fields specification can be a ``fields`` option or a ``fields``
sub-option from a ``fieldsets`` option component.
"""
for fields in flds:
# The entry in fields might be a tuple. If it is a standalone
# field, make it into a tuple to make processing easier.
if type(fields) != tuple:
fields = (fields,)
for field in fields:
if field in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a
# model field if it's in readonly_fields,
# readonly_fields will handle the validation of such
# things.
continue
try:
f = model._meta.get_field(field)
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches, it could be an
# extra field on the form; nothing to check so move on to the next field.
continue
if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created:
raise ImproperlyConfigured("'%s.%s' "
"can't include the ManyToManyField field '%s' because "
"'%s' manually specifies a 'through' model." % (
cls.__name__, label, field, field))
def validate_raw_id_fields(self, cls, model):
" Validate that raw_id_fields only contains field names that are listed on the model. "
if hasattr(cls, 'raw_id_fields'):
check_isseq(cls, 'raw_id_fields', cls.raw_id_fields)
for idx, field in enumerate(cls.raw_id_fields):
f = get_field(cls, model, 'raw_id_fields', field)
if not isinstance(f, (models.ForeignKey, models.ManyToManyField)):
raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must "
"be either a ForeignKey or ManyToManyField."
% (cls.__name__, idx, field))
def validate_fields(self, cls, model):
" Validate that fields only refer to existing fields, doesn't contain duplicates. "
# fields
if cls.fields: # default value is None
check_isseq(cls, 'fields', cls.fields)
self.check_field_spec(cls, model, cls.fields, 'fields')
if cls.fieldsets:
raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__)
if len(cls.fields) > len(set(cls.fields)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__)
def validate_fieldsets(self, cls, model):
" Validate that fieldsets is properly formatted and doesn't contain duplicates. "
from django.contrib.admin.options import flatten_fieldsets
if cls.fieldsets: # default value is None
check_isseq(cls, 'fieldsets', cls.fieldsets)
for idx, fieldset in enumerate(cls.fieldsets):
check_isseq(cls, 'fieldsets[%d]' % idx, fieldset)
if len(fieldset) != 2:
raise ImproperlyConfigured("'%s.fieldsets[%d]' does not "
"have exactly two elements." % (cls.__name__, idx))
check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1])
if 'fields' not in fieldset[1]:
raise ImproperlyConfigured("'fields' key is required in "
"%s.fieldsets[%d][1] field options dict."
% (cls.__name__, idx))
self.check_field_spec(cls, model, fieldset[1]['fields'], "fieldsets[%d][1]['fields']" % idx)
flattened_fieldsets = flatten_fieldsets(cls.fieldsets)
if len(flattened_fieldsets) > len(set(flattened_fieldsets)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__)
def validate_exclude(self, cls, model):
" Validate that exclude is a sequence without duplicates. "
if cls.exclude: # default value is None
check_isseq(cls, 'exclude', cls.exclude)
if len(cls.exclude) > len(set(cls.exclude)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__)
def validate_form(self, cls, model):
" Validate that form subclasses BaseModelForm. "
if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm):
raise ImproperlyConfigured("%s.form does not inherit from "
"BaseModelForm." % cls.__name__)
def validate_filter_vertical(self, cls, model):
" Validate that filter_vertical is a sequence of field names. "
if hasattr(cls, 'filter_vertical'):
check_isseq(cls, 'filter_vertical', cls.filter_vertical)
for idx, field in enumerate(cls.filter_vertical):
f = get_field(cls, model, 'filter_vertical', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
def validate_filter_horizontal(self, cls, model):
" Validate that filter_horizontal is a sequence of field names. "
if hasattr(cls, 'filter_horizontal'):
check_isseq(cls, 'filter_horizontal', cls.filter_horizontal)
for idx, field in enumerate(cls.filter_horizontal):
f = get_field(cls, model, 'filter_horizontal', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
def validate_radio_fields(self, cls, model):
" Validate that radio_fields is a dictionary of choice or foreign key fields. "
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if hasattr(cls, 'radio_fields'):
check_isdict(cls, 'radio_fields', cls.radio_fields)
for field, val in cls.radio_fields.items():
f = get_field(cls, model, 'radio_fields', field)
if not (isinstance(f, models.ForeignKey) or f.choices):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither an instance of ForeignKey nor does "
"have choices set." % (cls.__name__, field))
if not val in (HORIZONTAL, VERTICAL):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither admin.HORIZONTAL nor admin.VERTICAL."
% (cls.__name__, field))
def validate_prepopulated_fields(self, cls, model):
" Validate that prepopulated_fields if a dictionary containing allowed field types. "
# prepopulated_fields
if hasattr(cls, 'prepopulated_fields'):
check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields)
for field, val in cls.prepopulated_fields.items():
f = get_field(cls, model, 'prepopulated_fields', field)
if isinstance(f, (models.DateTimeField, models.ForeignKey,
models.ManyToManyField)):
raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' "
"is either a DateTimeField, ForeignKey or "
"ManyToManyField. This isn't allowed."
% (cls.__name__, field))
check_isseq(cls, "prepopulated_fields['%s']" % field, val)
for idx, f in enumerate(val):
get_field(cls, model, "prepopulated_fields['%s'][%d]" % (field, idx), f)
def validate_ordering(self, cls, model):
" Validate that ordering refers to existing fields or is random. "
# ordering = None
if cls.ordering:
check_isseq(cls, 'ordering', cls.ordering)
for idx, field in enumerate(cls.ordering):
if field == '?' and len(cls.ordering) != 1:
raise ImproperlyConfigured("'%s.ordering' has the random "
"ordering marker '?', but contains other fields as "
"well. Please either remove '?' or the other fields."
% cls.__name__)
if field == '?':
continue
if field.startswith('-'):
field = field[1:]
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field:
continue
get_field(cls, model, 'ordering[%d]' % idx, field)
def validate_readonly_fields(self, cls, model):
" Validate that readonly_fields refers to proper attribute or field. "
if hasattr(cls, "readonly_fields"):
check_isseq(cls, "readonly_fields", cls.readonly_fields)
for idx, field in enumerate(cls.readonly_fields):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
model._meta.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.readonly_fields[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
class ModelAdminValidator(BaseValidator):
def validate_save_as(self, cls, model):
" Validate save_as is a boolean. "
check_type(cls, 'save_as', bool)
def validate_save_on_top(self, cls, model):
" Validate save_on_top is a boolean. "
check_type(cls, 'save_on_top', bool)
def validate_inlines(self, cls, model):
" Validate inline model admin classes. "
from django.contrib.admin.options import BaseModelAdmin
if hasattr(cls, 'inlines'):
check_isseq(cls, 'inlines', cls.inlines)
for idx, inline in enumerate(cls.inlines):
if not issubclass(inline, BaseModelAdmin):
raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit "
"from BaseModelAdmin." % (cls.__name__, idx))
if not inline.model:
raise ImproperlyConfigured("'model' is a required attribute "
"of '%s.inlines[%d]'." % (cls.__name__, idx))
if not issubclass(inline.model, models.Model):
raise ImproperlyConfigured("'%s.inlines[%d].model' does not "
"inherit from models.Model." % (cls.__name__, idx))
inline.validate(inline.model)
self.check_inline(inline, model)
def check_inline(self, cls, parent_model):
" Validate inline class's fk field is not excluded. "
fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True)
if hasattr(cls, 'exclude') and cls.exclude:
if fk and fk.name in cls.exclude:
raise ImproperlyConfigured("%s cannot exclude the field "
"'%s' - this is the foreign key to the parent model "
"%s.%s." % (cls.__name__, fk.name, parent_model._meta.app_label, parent_model.__name__))
def validate_list_display(self, cls, model):
" Validate that list_display only contains fields or usable attributes. "
if hasattr(cls, 'list_display'):
check_isseq(cls, 'list_display', cls.list_display)
for idx, field in enumerate(cls.list_display):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
model._meta.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.list_display[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
else:
# getattr(model, field) could be an X_RelatedObjectsDescriptor
f = fetch_attr(cls, model, "list_display[%d]" % idx, field)
if isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.list_display[%d]', '%s' is a ManyToManyField which is not supported."
% (cls.__name__, idx, field))
def validate_list_display_links(self, cls, model):
" Validate that list_display_links is a unique subset of list_display. "
if hasattr(cls, 'list_display_links'):
check_isseq(cls, 'list_display_links', cls.list_display_links)
for idx, field in enumerate(cls.list_display_links):
if field not in cls.list_display:
raise ImproperlyConfigured("'%s.list_display_links[%d]' "
"refers to '%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field))
def validate_list_filter(self, cls, model):
"""
Validate that list_filter is a sequence of one of three options:
1: 'field' - a basic field filter, possibly w/ relationships (eg, 'field__rel')
2: ('field', SomeFieldListFilter) - a field-based list filter class
3: SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if hasattr(cls, 'list_filter'):
check_isseq(cls, 'list_filter', cls.list_filter)
for idx, item in enumerate(cls.list_filter):
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is not a descendant of ListFilter."
% (cls.__name__, idx, item.__name__))
# ... but not a FieldListFilter.
if issubclass(item, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is of type FieldListFilter but is not"
" associated with a field name."
% (cls.__name__, idx, item.__name__))
else:
if isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d][1]'"
" is '%s' which is not of type FieldListFilter."
% (cls.__name__, idx, list_filter_class.__name__))
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
raise ImproperlyConfigured("'%s.list_filter[%d]' refers to '%s'"
" which does not refer to a Field."
% (cls.__name__, idx, field))
def validate_list_select_related(self, cls, model):
" Validate that list_select_related is a boolean, a list or a tuple. "
list_select_related = getattr(cls, 'list_select_related', None)
if list_select_related:
types = (bool, tuple, list)
if not isinstance(list_select_related, types):
raise ImproperlyConfigured("'%s.list_select_related' should be "
"either a bool, a tuple or a list" %
cls.__name__)
def validate_list_per_page(self, cls, model):
" Validate that list_per_page is an integer. "
check_type(cls, 'list_per_page', int)
def validate_list_max_show_all(self, cls, model):
" Validate that list_max_show_all is an integer. "
check_type(cls, 'list_max_show_all', int)
def validate_list_editable(self, cls, model):
"""
Validate that list_editable is a sequence of editable fields from
list_display without first element.
"""
if hasattr(cls, 'list_editable') and cls.list_editable:
check_isseq(cls, 'list_editable', cls.list_editable)
for idx, field_name in enumerate(cls.list_editable):
try:
field = model._meta.get_field_by_name(field_name)[0]
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', not defined on %s.%s."
% (cls.__name__, idx, field_name, model._meta.app_label, model.__name__))
if field_name not in cls.list_display:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to "
"'%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field_name))
if field_name in cls.list_display_links:
raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'"
" and '%s.list_display_links'"
% (field_name, cls.__name__, cls.__name__))
if not cls.list_display_links and cls.list_display[0] in cls.list_editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to"
" the first field in list_display, '%s', which can't be"
" used unless list_display_links is set."
% (cls.__name__, idx, cls.list_display[0]))
if not field.editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', which isn't editable through the admin."
% (cls.__name__, idx, field_name))
def validate_search_fields(self, cls, model):
" Validate search_fields is a sequence. "
if hasattr(cls, 'search_fields'):
check_isseq(cls, 'search_fields', cls.search_fields)
def validate_date_hierarchy(self, cls, model):
" Validate that date_hierarchy refers to DateField or DateTimeField. "
if cls.date_hierarchy:
f = get_field(cls, model, 'date_hierarchy', cls.date_hierarchy)
if not isinstance(f, (models.DateField, models.DateTimeField)):
raise ImproperlyConfigured("'%s.date_hierarchy is "
"neither an instance of DateField nor DateTimeField."
% cls.__name__)
class InlineValidator(BaseValidator):
def validate_fk_name(self, cls, model):
" Validate that fk_name refers to a ForeignKey. "
if cls.fk_name: # default value is None
f = get_field(cls, model, 'fk_name', cls.fk_name)
if not isinstance(f, models.ForeignKey):
raise ImproperlyConfigured("'%s.fk_name is not an instance of "
"models.ForeignKey." % cls.__name__)
def validate_extra(self, cls, model):
" Validate that extra is an integer. "
check_type(cls, 'extra', int)
def validate_max_num(self, cls, model):
" Validate that max_num is an integer. "
check_type(cls, 'max_num', int)
def validate_formset(self, cls, model):
" Validate formset is a subclass of BaseModelFormSet. "
if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseModelFormSet):
raise ImproperlyConfigured("'%s.formset' does not inherit from "
"BaseModelFormSet." % cls.__name__)
def check_type(cls, attr, type_):
if getattr(cls, attr, None) is not None and not isinstance(getattr(cls, attr), type_):
raise ImproperlyConfigured("'%s.%s' should be a %s."
% (cls.__name__, attr, type_.__name__ ))
def check_isseq(cls, label, obj):
if not isinstance(obj, (list, tuple)):
raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label))
def check_isdict(cls, label, obj):
if not isinstance(obj, dict):
raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label))
def get_field(cls, model, label, field):
try:
return model._meta.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
def fetch_attr(cls, model, label, field):
try:
return model._meta.get_field(field)
except models.FieldDoesNotExist:
pass
try:
return getattr(model, field)
except AttributeError:
raise ImproperlyConfigured("'%s.%s' refers to '%s' that is neither a field, method or property of model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
|
patdaburu/mothergeo-py
|
refs/heads/master
|
mothergeo/db/postgis/__init__.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: __init__.py
.. moduleauthor:: Pat Daburu <pat@daburu.net>
Provide a brief description of the module.
"""
|
apyrgio/synnefo
|
refs/heads/release-0.16
|
snf-django-lib/snf_django/utils/routers.py
|
8
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Router for the Astakos/Cyclades app. It is used to specify which database will
be used for each model.
"""
from snf_django.utils.db import select_db
class SynnefoRouter(object):
"""Router for Astakos/Cyclades models."""
def db_for_read(self, model, **hints):
"""Select db to read."""
app = model._meta.app_label
return select_db(app)
def db_for_write(self, model, **hints):
"""Select db to write."""
app = model._meta.app_label
return select_db(app)
# The rest of the methods are ommited since relations and syncing should
# not affect the router.
|
cidadania/e-cidadania
|
refs/heads/master
|
src/core/spaces/url_names.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module to store space related url names.
"""
# Spaces
SPACE_ADD = 'create-space'
SPACE_EDIT = 'edit-space'
SPACE_DELETE = 'delete-space'
SPACE_INDEX = 'space-index'
SPACE_FEED = 'space-feed'
SPACE_LIST = 'list-spaces'
GOTO_SPACE = 'goto-space'
EDIT_ROLES = 'edit-roles'
SEARCH_USER = 'search-user'
# News
# Notes: SPACE_NEWS is held only for backwards compatibility, it should be
# removed when every reverse is cleaned
SPACE_NEWS = 'list-space-news'
NEWS_ARCHIVE = 'post-archive'
NEWS_MONTH = 'post-archive-month'
NEWS_YEAR = 'post-archive-year'
# Documents
DOCUMENT_ADD = 'add-document'
DOCUMENT_EDIT = 'edit-document'
DOCUMENT_DELETE = 'delete-document'
DOCUMENT_LIST = 'list-documents'
# Events
EVENT_ADD = 'add-event'
EVENT_EDIT = 'edit-event'
EVENT_DELETE = 'delete-event'
EVENT_LIST = 'list-events'
EVENT_VIEW = 'view-event'
# Intents
INTENT_ADD = 'add-intent'
INTENT_VALIDATE = 'validate-intent'
|
wilblack/AutobahnPython
|
refs/heads/master
|
examples/twisted/wamp/basic/rpc/options/frontend.py
|
8
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import CallOptions, RegisterOptions, PublishOptions
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component calling the different backend procedures.
"""
@inlineCallbacks
def onJoin(self, details):
def on_event(val):
print("Someone requested to square non-positive: {}".format(val))
yield self.subscribe(on_event, 'com.myapp.square_on_nonpositive')
for val in [2, 0, -2]:
res = yield self.call('com.myapp.square', val, options = CallOptions(discloseMe = True))
print("Squared {} = {}".format(val, res))
self.leave()
def onDisconnect(self):
reactor.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.