text stringlengths 4 1.02M | meta dict |
|---|---|
import json
import os
from core.AbstractConfig import AbstractConfig
__author__ = 'sweet'
# rootDir = os.path.join(os.path.split(os.path.realpath(__file__))[0], '..')
# config_path = os.path.join(rootDir, 'config', 'autosign.conf')
class JsonConfig(AbstractConfig):
def __init__(self, config_path):
self.path = config_path
with open(config_path) as fr:
self.conf = json.loads(fr.read()) # ConfigParser.ConfigParser()
pass
def get(self, section, option):
result = self.conf[section][option]
return result
def get_dict(self):
return self.conf
if __name__ == "__main__":
print(os.path.expanduser('~/.myapp.cfg'))
print(os.path.expanduser('.myapp.cfg'))
rootDir = os.path.join(os.path.split(os.path.realpath(__file__))[0], '..')
print(rootDir)
# strJson = r'{"name":"test","age":18}'
# j = json.loads(strJson)
# print(dir(j))
# print(type(j))
# print('age' in j.keys())
# print(getattr(j, 'age'))
pass
| {
"content_hash": "4350c480ad76a9679d4b0cc745227f5a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 26.28205128205128,
"alnum_prop": 0.5980487804878049,
"repo_name": "john123951/gold.icbc.watcher",
"id": "9e4ab0303b352d1284a9d16ab8d7a40a69448c3f",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/util/JsonConfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8089"
}
],
"symlink_target": ""
} |
"""Support for Z-Wave."""
import asyncio
import copy
from importlib import import_module
import logging
from pprint import pprint
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback, CoreState
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_component import DEFAULT_SCAN_INTERVAL
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.const import (
ATTR_ENTITY_ID,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers.event import async_track_time_change
from homeassistant.util import convert
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from . import const
from . import config_flow # noqa pylint: disable=unused-import
from . import websocket_api as wsapi
from .const import (
CONF_AUTOHEAL,
CONF_DEBUG,
CONF_POLLING_INTERVAL,
CONF_USB_STICK_PATH,
CONF_CONFIG_PATH,
CONF_NETWORK_KEY,
DEFAULT_CONF_AUTOHEAL,
DEFAULT_CONF_USB_STICK_PATH,
DEFAULT_POLLING_INTERVAL,
DEFAULT_DEBUG,
DOMAIN,
DATA_DEVICES,
DATA_NETWORK,
DATA_ENTITY_VALUES,
DATA_ZWAVE_CONFIG,
)
from .node_entity import ZWaveBaseEntity, ZWaveNodeEntity
from . import workaround
from .discovery_schemas import DISCOVERY_SCHEMAS
from .util import (
check_node_schema,
check_value_schema,
node_name,
check_has_unique_id,
is_node_parsed,
node_device_id_and_name,
)
_LOGGER = logging.getLogger(__name__)
CLASS_ID = "class_id"
ATTR_POWER = "power_consumption"
CONF_POLLING_INTENSITY = "polling_intensity"
CONF_IGNORED = "ignored"
CONF_INVERT_OPENCLOSE_BUTTONS = "invert_openclose_buttons"
CONF_INVERT_PERCENT = "invert_percent"
CONF_REFRESH_VALUE = "refresh_value"
CONF_REFRESH_DELAY = "delay"
CONF_DEVICE_CONFIG = "device_config"
CONF_DEVICE_CONFIG_GLOB = "device_config_glob"
CONF_DEVICE_CONFIG_DOMAIN = "device_config_domain"
DEFAULT_CONF_IGNORED = False
DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS = False
DEFAULT_CONF_INVERT_PERCENT = False
DEFAULT_CONF_REFRESH_VALUE = False
DEFAULT_CONF_REFRESH_DELAY = 5
SUPPORTED_PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"fan",
"lock",
"light",
"sensor",
"switch",
]
RENAME_NODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
vol.Optional(const.ATTR_UPDATE_IDS, default=False): cv.boolean,
}
)
RENAME_VALUE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
vol.Optional(const.ATTR_UPDATE_IDS, default=False): cv.boolean,
}
)
SET_CONFIG_PARAMETER_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(vol.Coerce(int), cv.string),
vol.Optional(const.ATTR_CONFIG_SIZE, default=2): vol.Coerce(int),
}
)
SET_NODE_VALUE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Coerce(int),
}
)
REFRESH_NODE_VALUE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
}
)
SET_POLL_INTENSITY_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_POLL_INTENSITY): vol.Coerce(int),
}
)
PRINT_CONFIG_PARAMETER_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
}
)
NODE_SERVICE_SCHEMA = vol.Schema({vol.Required(const.ATTR_NODE_ID): vol.Coerce(int)})
REFRESH_ENTITY_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id})
RESET_NODE_METERS_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=1): vol.Coerce(int),
}
)
CHANGE_ASSOCIATION_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_ASSOCIATION): cv.string,
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_TARGET_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_GROUP): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=0x00): vol.Coerce(int),
}
)
SET_WAKEUP_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.All(
vol.Coerce(int), cv.positive_int
),
}
)
HEAL_NODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_RETURN_ROUTES, default=False): cv.boolean,
}
)
TEST_NODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_MESSAGES, default=1): cv.positive_int,
}
)
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema(
{
vol.Optional(CONF_POLLING_INTENSITY): cv.positive_int,
vol.Optional(CONF_IGNORED, default=DEFAULT_CONF_IGNORED): cv.boolean,
vol.Optional(
CONF_INVERT_OPENCLOSE_BUTTONS, default=DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS
): cv.boolean,
vol.Optional(
CONF_INVERT_PERCENT, default=DEFAULT_CONF_INVERT_PERCENT
): cv.boolean,
vol.Optional(
CONF_REFRESH_VALUE, default=DEFAULT_CONF_REFRESH_VALUE
): cv.boolean,
vol.Optional(
CONF_REFRESH_DELAY, default=DEFAULT_CONF_REFRESH_DELAY
): cv.positive_int,
}
)
SIGNAL_REFRESH_ENTITY_FORMAT = "zwave_refresh_entity_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_AUTOHEAL, default=DEFAULT_CONF_AUTOHEAL): cv.boolean,
vol.Optional(CONF_CONFIG_PATH): cv.string,
vol.Optional(CONF_NETWORK_KEY): vol.All(
cv.string, vol.Match(r"(0x\w\w,\s?){15}0x\w\w")
),
vol.Optional(CONF_DEVICE_CONFIG, default={}): vol.Schema(
{cv.entity_id: DEVICE_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_DEVICE_CONFIG_GLOB, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_DEVICE_CONFIG_DOMAIN, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
vol.Optional(
CONF_POLLING_INTERVAL, default=DEFAULT_POLLING_INTERVAL
): cv.positive_int,
vol.Optional(CONF_USB_STICK_PATH): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {
key: getattr(obj, key)
for key in dir(obj)
if key[0] != "_" and not callable(getattr(obj, key))
}
def _value_name(value):
"""Return the name of the value."""
return "{} {}".format(node_name(value.node), value.label).strip()
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict["values"] = {
value_id: _obj_to_dict(value) for value_id, value in node.values.items()
}
_LOGGER.info("FOUND NODE %s \n" "%s", node.product_name, node_dict)
def get_config_value(node, value_index, tries=5):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
if (
value.command_class == const.COMMAND_CLASS_CONFIGURATION
and value.index == value_index
):
return value.data
except RuntimeError:
# If we get a runtime error the dict has changed while
# we was looking for a value, just do it again
return (
None if tries <= 0 else get_config_value(node, value_index, tries=tries - 1)
)
return None
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Z-Wave platform (generic part)."""
if discovery_info is None or DATA_NETWORK not in hass.data:
return False
device = hass.data[DATA_DEVICES].get(discovery_info[const.DISCOVERY_DEVICE], None)
if device is None:
return False
async_add_entities([device])
return True
async def async_setup(hass, config):
"""Set up Z-Wave components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.data[DATA_ZWAVE_CONFIG] = conf
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_USB_STICK_PATH: conf.get(
CONF_USB_STICK_PATH, DEFAULT_CONF_USB_STICK_PATH
),
CONF_NETWORK_KEY: conf.get(CONF_NETWORK_KEY),
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Z-Wave from a config entry.
Will automatically load components to support devices found on the network.
"""
from pydispatch import dispatcher
# pylint: disable=import-error
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
from openzwave.group import ZWaveGroup
# Merge config entry and yaml config
config = config_entry.data
if DATA_ZWAVE_CONFIG in hass.data:
config = {**config, **hass.data[DATA_ZWAVE_CONFIG]}
# Update hass.data with merged config so we can access it elsewhere
hass.data[DATA_ZWAVE_CONFIG] = config
# Load configuration
use_debug = config.get(CONF_DEBUG, DEFAULT_DEBUG)
autoheal = config.get(CONF_AUTOHEAL, DEFAULT_CONF_AUTOHEAL)
device_config = EntityValues(
config.get(CONF_DEVICE_CONFIG),
config.get(CONF_DEVICE_CONFIG_DOMAIN),
config.get(CONF_DEVICE_CONFIG_GLOB),
)
usb_path = config[CONF_USB_STICK_PATH]
_LOGGER.info("Z-Wave USB path is %s", usb_path)
# Setup options
options = ZWaveOption(
usb_path,
user_path=hass.config.config_dir,
config_path=config.get(CONF_CONFIG_PATH),
)
options.set_console_output(use_debug)
if config.get(CONF_NETWORK_KEY):
options.addOption("NetworkKey", config[CONF_NETWORK_KEY])
await hass.async_add_executor_job(options.lock)
network = hass.data[DATA_NETWORK] = ZWaveNetwork(options, autostart=False)
hass.data[DATA_DEVICES] = {}
hass.data[DATA_ENTITY_VALUES] = []
registry = await async_get_registry(hass)
wsapi.async_load_websocket_api(hass)
if use_debug: # pragma: no cover
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (
ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED,
ZWaveNetwork.SIGNAL_SCENE_EVENT,
ZWaveNetwork.SIGNAL_NODE_EVENT,
ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD,
):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Handle new added value to a node on the network."""
# Check if this value should be tracked by an existing entity
for values in hass.data[DATA_ENTITY_VALUES]:
values.check_value(value)
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value, schema[const.DISC_VALUES][const.DISC_PRIMARY]
):
continue
values = ZWaveDeviceEntityValues(
hass, schema, value, config, device_config, registry
)
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
new_values = hass.data[DATA_ENTITY_VALUES] + [values]
hass.data[DATA_ENTITY_VALUES] = new_values
platform = EntityPlatform(
hass=hass,
logger=_LOGGER,
domain=DOMAIN,
platform_name=DOMAIN,
platform=None,
scan_interval=DEFAULT_SCAN_INTERVAL,
entity_namespace=None,
async_entities_added_callback=lambda: None,
)
platform.config_entry = config_entry
def node_added(node):
"""Handle a new node on the network."""
entity = ZWaveNodeEntity(node, network)
async def _add_node_to_component():
if hass.data[DATA_DEVICES].get(entity.unique_id):
return
name = node_name(node)
generated_id = generate_entity_id(DOMAIN + ".{}", name, [])
node_config = device_config.get(generated_id)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring node entity %s due to device settings", generated_id
)
return
hass.data[DATA_DEVICES][entity.unique_id] = entity
await platform.async_add_entities([entity])
if entity.unique_id:
hass.async_add_job(_add_node_to_component())
return
@callback
def _on_ready(sec):
_LOGGER.info("Z-Wave node %d ready after %d seconds", entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave node %d not ready after %d seconds, " "continuing anyway",
entity.node_id,
sec,
)
hass.async_add_job(_add_node_to_component)
hass.add_job(check_has_unique_id, entity, _on_ready, _on_timeout)
def node_removed(node):
node_id = node.node_id
node_key = f"node-{node_id}"
_LOGGER.info("Node Removed: %s", hass.data[DATA_DEVICES][node_key])
for key in list(hass.data[DATA_DEVICES]):
if not key.startswith(f"{node_id}-"):
continue
entity = hass.data[DATA_DEVICES][key]
_LOGGER.info(
"Removing Entity - value: %s - entity_id: %s", key, entity.entity_id
)
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][key]
entity = hass.data[DATA_DEVICES][node_key]
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][node_key]
def network_ready():
"""Handle the query of all awake nodes."""
_LOGGER.info(
"Z-Wave network is ready for use. All awake nodes "
"have been queried. Sleeping nodes will be "
"queried when they awake."
)
hass.bus.fire(const.EVENT_NETWORK_READY)
def network_complete():
"""Handle the querying of all nodes on network."""
_LOGGER.info(
"Z-Wave network is complete. All nodes on the network " "have been queried"
)
hass.bus.fire(const.EVENT_NETWORK_COMPLETE)
def network_complete_some_dead():
"""Handle the querying of all nodes on network."""
_LOGGER.info(
"Z-Wave network is complete. All nodes on the network "
"have been queried, but some nodes are marked dead"
)
hass.bus.fire(const.EVENT_NETWORK_COMPLETE_SOME_DEAD)
dispatcher.connect(value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(node_added, ZWaveNetwork.SIGNAL_NODE_ADDED, weak=False)
dispatcher.connect(node_removed, ZWaveNetwork.SIGNAL_NODE_REMOVED, weak=False)
dispatcher.connect(
network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False
)
dispatcher.connect(
network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False
)
dispatcher.connect(
network_complete_some_dead,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD,
weak=False,
)
def add_node(service):
"""Switch into inclusion mode."""
_LOGGER.info("Z-Wave add_node have been initialized")
network.controller.add_node()
def add_node_secure(service):
"""Switch into secure inclusion mode."""
_LOGGER.info("Z-Wave add_node_secure have been initialized")
network.controller.add_node(True)
def remove_node(service):
"""Switch into exclusion mode."""
_LOGGER.info("Z-Wave remove_node have been initialized")
network.controller.remove_node()
def cancel_command(service):
"""Cancel a running controller command."""
_LOGGER.info("Cancel running Z-Wave command")
network.controller.cancel_command()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("Z-Wave heal running")
network.heal()
def soft_reset(service):
"""Soft reset the controller."""
_LOGGER.info("Z-Wave soft_reset have been initialized")
network.controller.soft_reset()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
_LOGGER.info("Z-Wave test_network have been initialized")
network.test()
def stop_network(_service_or_event):
"""Stop Z-Wave network."""
_LOGGER.info("Stopping Z-Wave network")
network.stop()
if hass.state == CoreState.running:
hass.bus.fire(const.EVENT_NETWORK_STOP)
async def rename_node(service):
"""Rename a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
name = service.data.get(const.ATTR_NAME)
node.name = name
_LOGGER.info("Renamed Z-Wave node %d to %s", node_id, name)
update_ids = service.data.get(const.ATTR_UPDATE_IDS)
# We want to rename the device, the node entity,
# and all the contained entities
node_key = f"node-{node_id}"
entity = hass.data[DATA_DEVICES][node_key]
await entity.node_renamed(update_ids)
for key in list(hass.data[DATA_DEVICES]):
if not key.startswith(f"{node_id}-"):
continue
entity = hass.data[DATA_DEVICES][key]
await entity.value_renamed(update_ids)
async def rename_value(service):
"""Rename a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
name = service.data.get(const.ATTR_NAME)
value.label = name
_LOGGER.info(
"Renamed Z-Wave value (Node %d Value %d) to %s", node_id, value_id, name
)
update_ids = service.data.get(const.ATTR_UPDATE_IDS)
value_key = f"{node_id}-{value_id}"
entity = hass.data[DATA_DEVICES][value_key]
await entity.value_renamed(update_ids)
def set_poll_intensity(service):
"""Set the polling intensity of a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
intensity = service.data.get(const.ATTR_POLL_INTENSITY)
if intensity == 0:
if value.disable_poll():
_LOGGER.info("Polling disabled (Node %d Value %d)", node_id, value_id)
return
_LOGGER.info(
"Polling disabled failed (Node %d Value %d)", node_id, value_id
)
else:
if value.enable_poll(intensity):
_LOGGER.info(
"Set polling intensity (Node %d Value %d) to %s",
node_id,
value_id,
intensity,
)
return
_LOGGER.info(
"Set polling intensity failed (Node %d Value %d)", node_id, value_id
)
def remove_failed_node(service):
"""Remove failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to remove zwave node %d", node_id)
network.controller.remove_failed_node(node_id)
def replace_failed_node(service):
"""Replace failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to replace zwave node %d", node_id)
network.controller.replace_failed_node(node_id)
def set_config_parameter(service):
"""Set a config parameter to a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
selection = service.data.get(const.ATTR_CONFIG_VALUE)
size = service.data.get(const.ATTR_CONFIG_SIZE)
for value in node.get_values(
class_id=const.COMMAND_CLASS_CONFIGURATION
).values():
if value.index != param:
continue
if value.type == const.TYPE_BOOL:
value.data = int(selection == "True")
_LOGGER.info(
"Setting config parameter %s on Node %s " "with bool selection %s",
param,
node_id,
str(selection),
)
return
if value.type == const.TYPE_LIST:
value.data = str(selection)
_LOGGER.info(
"Setting config parameter %s on Node %s " "with list selection %s",
param,
node_id,
str(selection),
)
return
if value.type == const.TYPE_BUTTON:
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info(
"Setting config parameter %s on Node %s "
"with button selection %s",
param,
node_id,
selection,
)
return
value.data = int(selection)
_LOGGER.info(
"Setting config parameter %s on Node %s " "with selection %s",
param,
node_id,
selection,
)
return
node.set_config_param(param, selection, size)
_LOGGER.info(
"Setting unknown config parameter %s on Node %s " "with selection %s",
param,
node_id,
selection,
)
def refresh_node_value(service):
"""Refresh the specified value from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
node.values[value_id].refresh()
_LOGGER.info("Node %s value %s refreshed", node_id, value_id)
def set_node_value(service):
"""Set the specified value on a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
value = service.data.get(const.ATTR_CONFIG_VALUE)
node = network.nodes[node_id]
node.values[value_id].data = value
_LOGGER.info("Node %s value %s set to %s", node_id, value_id, value)
def print_config_parameter(service):
"""Print a config parameter from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
_LOGGER.info(
"Config parameter %s on Node %s: %s",
param,
node_id,
get_config_value(node, param),
)
def print_node(service):
"""Print all information about z-wave node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
nice_print_node(node)
def set_wakeup(service):
"""Set wake-up interval of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
value = service.data.get(const.ATTR_CONFIG_VALUE)
if node.can_wake_up():
for value_id in node.get_values(class_id=const.COMMAND_CLASS_WAKE_UP):
node.values[value_id].data = value
_LOGGER.info("Node %s wake-up set to %d", node_id, value)
else:
_LOGGER.info("Node %s is not wakeable", node_id)
def change_association(service):
"""Change an association in the zwave network."""
association_type = service.data.get(const.ATTR_ASSOCIATION)
node_id = service.data.get(const.ATTR_NODE_ID)
target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID)
group = service.data.get(const.ATTR_GROUP)
instance = service.data.get(const.ATTR_INSTANCE)
node = ZWaveGroup(group, network, node_id)
if association_type == "add":
node.add_association(target_node_id, instance)
_LOGGER.info(
"Adding association for node:%s in group:%s "
"target node:%s, instance=%s",
node_id,
group,
target_node_id,
instance,
)
if association_type == "remove":
node.remove_association(target_node_id, instance)
_LOGGER.info(
"Removing association for node:%s in group:%s "
"target node:%s, instance=%s",
node_id,
group,
target_node_id,
instance,
)
async def async_refresh_entity(service):
"""Refresh values that specific entity depends on."""
entity_id = service.data.get(ATTR_ENTITY_ID)
async_dispatcher_send(hass, SIGNAL_REFRESH_ENTITY_FORMAT.format(entity_id))
def refresh_node(service):
"""Refresh all node info."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
node.refresh_info()
def reset_node_meters(service):
"""Reset meter counters of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
instance = service.data.get(const.ATTR_INSTANCE)
node = network.nodes[node_id]
for value in node.get_values(class_id=const.COMMAND_CLASS_METER).values():
if value.index != const.INDEX_METER_RESET:
continue
if value.instance != instance:
continue
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info(
"Resetting meters on node %s instance %s....", node_id, instance
)
return
_LOGGER.info(
"Node %s on instance %s does not have resettable " "meters.",
node_id,
instance,
)
def heal_node(service):
"""Heal a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
update_return_routes = service.data.get(const.ATTR_RETURN_ROUTES)
node = network.nodes[node_id]
_LOGGER.info("Z-Wave node heal running for node %s", node_id)
node.heal(update_return_routes)
def test_node(service):
"""Send test messages to a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
messages = service.data.get(const.ATTR_MESSAGES)
node = network.nodes[node_id]
_LOGGER.info("Sending %s test-messages to node %s.", messages, node_id)
node.test(messages)
def start_zwave(_service_or_event):
"""Startup Z-Wave network."""
_LOGGER.info("Starting Z-Wave network...")
network.start()
hass.bus.fire(const.EVENT_NETWORK_START)
async def _check_awaked():
"""Wait for Z-wave awaked state (or timeout) and finalize start."""
_LOGGER.debug("network state: %d %s", network.state, network.state_str)
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow() - start_time).total_seconds())
if network.state >= network.STATE_AWAKED:
# Need to be in STATE_AWAKED before talking to nodes.
_LOGGER.info("Z-Wave ready after %d seconds", waited)
break
if waited >= const.NETWORK_READY_WAIT_SECS:
# Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave
# network to be ready.
_LOGGER.warning(
"Z-Wave not ready after %d seconds, continuing anyway", waited
)
_LOGGER.info(
"final network state: %d %s", network.state, network.state_str
)
break
await asyncio.sleep(1)
hass.async_add_job(_finalize_start)
hass.add_job(_check_awaked)
def _finalize_start():
"""Perform final initializations after Z-Wave network is awaked."""
polling_interval = convert(config.get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
network.set_poll_interval(polling_interval, False)
poll_interval = network.get_poll_interval()
_LOGGER.info("Z-Wave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_network)
# Register node services for Z-Wave network
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE, add_node_secure)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND, cancel_command)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK, heal_network)
hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK, test_network)
hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK, stop_network)
hass.services.register(
DOMAIN, const.SERVICE_RENAME_NODE, rename_node, schema=RENAME_NODE_SCHEMA
)
hass.services.register(
DOMAIN, const.SERVICE_RENAME_VALUE, rename_value, schema=RENAME_VALUE_SCHEMA
)
hass.services.register(
DOMAIN,
const.SERVICE_SET_CONFIG_PARAMETER,
set_config_parameter,
schema=SET_CONFIG_PARAMETER_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_SET_NODE_VALUE,
set_node_value,
schema=SET_NODE_VALUE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_REFRESH_NODE_VALUE,
refresh_node_value,
schema=REFRESH_NODE_VALUE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_PRINT_CONFIG_PARAMETER,
print_config_parameter,
schema=PRINT_CONFIG_PARAMETER_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_REMOVE_FAILED_NODE,
remove_failed_node,
schema=NODE_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_REPLACE_FAILED_NODE,
replace_failed_node,
schema=NODE_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_CHANGE_ASSOCIATION,
change_association,
schema=CHANGE_ASSOCIATION_SCHEMA,
)
hass.services.register(
DOMAIN, const.SERVICE_SET_WAKEUP, set_wakeup, schema=SET_WAKEUP_SCHEMA
)
hass.services.register(
DOMAIN, const.SERVICE_PRINT_NODE, print_node, schema=NODE_SERVICE_SCHEMA
)
hass.services.register(
DOMAIN,
const.SERVICE_REFRESH_ENTITY,
async_refresh_entity,
schema=REFRESH_ENTITY_SCHEMA,
)
hass.services.register(
DOMAIN, const.SERVICE_REFRESH_NODE, refresh_node, schema=NODE_SERVICE_SCHEMA
)
hass.services.register(
DOMAIN,
const.SERVICE_RESET_NODE_METERS,
reset_node_meters,
schema=RESET_NODE_METERS_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_SET_POLL_INTENSITY,
set_poll_intensity,
schema=SET_POLL_INTENSITY_SCHEMA,
)
hass.services.register(
DOMAIN, const.SERVICE_HEAL_NODE, heal_node, schema=HEAL_NODE_SCHEMA
)
hass.services.register(
DOMAIN, const.SERVICE_TEST_NODE, test_node, schema=TEST_NODE_SCHEMA
)
# Setup autoheal
if autoheal:
_LOGGER.info("Z-Wave network autoheal is enabled")
async_track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
hass.services.async_register(DOMAIN, const.SERVICE_START_NETWORK, start_zwave)
for entry_component in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, entry_component)
)
return True
class ZWaveDeviceEntityValues:
"""Manages entity access to the underlying zwave value objects."""
def __init__(
self, hass, schema, primary_value, zwave_config, device_config, registry
):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._zwave_config = zwave_config
self._device_config = device_config
self._schema = copy.deepcopy(schema)
self._values = {}
self._entity = None
self._workaround_ignore = False
self._registry = registry
for name in self._schema[const.DISC_VALUES].keys():
self._values[name] = None
self._schema[const.DISC_VALUES][name][const.DISC_INSTANCE] = [
primary_value.instance
]
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
# Check values that have already been discovered for node
for value in self._node.values.values():
self.check_value(value)
self._check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values[name]
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
if not check_node_schema(value.node, self._schema):
return
for name in self._values:
if self._values[name] is not None:
continue
if not check_value_schema(value, self._schema[const.DISC_VALUES][name]):
continue
self._values[name] = value
if self._entity:
self._entity.value_added()
self._entity.value_changed()
self._check_entity_ready()
def _check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
if self._workaround_ignore:
return
if self._entity is not None:
return
for name in self._schema[const.DISC_VALUES]:
if self._values[name] is None and not self._schema[const.DISC_VALUES][
name
].get(const.DISC_OPTIONAL):
return
component = self._schema[const.DISC_COMPONENT]
workaround_component = workaround.get_device_component_mapping(self.primary)
if workaround_component and workaround_component != component:
if workaround_component == workaround.WORKAROUND_IGNORE:
_LOGGER.info(
"Ignoring Node %d Value %d due to workaround.",
self.primary.node.node_id,
self.primary.value_id,
)
# No entity will be created for this value
self._workaround_ignore = True
return
_LOGGER.debug("Using %s instead of %s", workaround_component, component)
component = workaround_component
entity_id = self._registry.async_get_entity_id(
component, DOMAIN, compute_value_unique_id(self._node, self.primary)
)
if entity_id is None:
value_name = _value_name(self.primary)
entity_id = generate_entity_id(component + ".{}", value_name, [])
node_config = self._device_config.get(entity_id)
# Configure node
_LOGGER.debug(
"Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Value type=%s, "
"Genre=%s as %s",
self._node.node_id,
self._node.generic,
self._node.specific,
self.primary.command_class,
self.primary.type,
self.primary.genre,
component,
)
if node_config.get(CONF_IGNORED):
_LOGGER.info("Ignoring entity %s due to device settings", entity_id)
# No entity will be created for this value
self._workaround_ignore = True
return
polling_intensity = convert(node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
self.primary.enable_poll(polling_intensity)
platform = import_module(f".{component}", __name__)
device = platform.get_device(
node=self._node, values=self, node_config=node_config, hass=self._hass
)
if device is None:
# No entity will be created for this value
self._workaround_ignore = True
return
self._entity = device
@callback
def _on_ready(sec):
_LOGGER.info(
"Z-Wave entity %s (node_id: %d) ready after %d seconds",
device.name,
self._node.node_id,
sec,
)
self._hass.async_add_job(discover_device, component, device)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave entity %s (node_id: %d) not ready after %d seconds, "
"continuing anyway",
device.name,
self._node.node_id,
sec,
)
self._hass.async_add_job(discover_device, component, device)
async def discover_device(component, device):
"""Put device in a dictionary and call discovery on it."""
if self._hass.data[DATA_DEVICES].get(device.unique_id):
return
self._hass.data[DATA_DEVICES][device.unique_id] = device
if component in SUPPORTED_PLATFORMS:
async_dispatcher_send(self._hass, f"zwave_new_{component}", device)
else:
await discovery.async_load_platform(
self._hass,
component,
DOMAIN,
{const.DISCOVERY_DEVICE: device.unique_id},
self._zwave_config,
)
if device.unique_id:
self._hass.add_job(discover_device, component, device)
else:
self._hass.add_job(check_has_unique_id, device, _on_ready, _on_timeout)
class ZWaveDeviceEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node entity."""
def __init__(self, values, domain):
"""Initialize the z-Wave device."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self.values = values
self.node = values.primary.node
self.values.primary.set_change_verified(False)
self._name = _value_name(self.values.primary)
self._unique_id = self._compute_unique_id()
self._update_attributes()
dispatcher.connect(
self.network_value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED
)
def network_value_changed(self, value):
"""Handle a value change on the network."""
if value.value_id in [v.value_id for v in self.values if v]:
return self.value_changed()
def value_added(self):
"""Handle a new value of this entity."""
pass
def value_changed(self):
"""Handle a changed value for this entity's node."""
self._update_attributes()
self.update_properties()
self.maybe_schedule_update()
async def value_renamed(self, update_ids=False):
"""Rename the node and update any IDs."""
self._name = _value_name(self.values.primary)
if update_ids:
# Update entity ID.
ent_reg = await async_get_registry(self.hass)
new_entity_id = ent_reg.async_generate_entity_id(
self.platform.domain,
self._name,
self.platform.entities.keys() - {self.entity_id},
)
if new_entity_id != self.entity_id:
# Don't change the name attribute, it will be None unless
# customised and if it's been customised, keep the
# customisation.
ent_reg.async_update_entity(self.entity_id, new_entity_id=new_entity_id)
return
# else for the above two ifs, update if not using update_entity
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Add device to dict."""
async_dispatcher_connect(
self.hass,
SIGNAL_REFRESH_ENTITY_FORMAT.format(self.entity_id),
self.refresh_from_network,
)
def _update_attributes(self):
"""Update the node attributes. May only be used inside callback."""
self.node_id = self.node.node_id
self._name = _value_name(self.values.primary)
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._unique_id:
self.try_remove_and_add()
if self.values.power:
self.power_consumption = round(
self.values.power.data, self.values.power.precision
)
else:
self.power_consumption = None
def update_properties(self):
"""Update on data changes for node values."""
pass
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
identifier, name = node_device_id_and_name(
self.node, self.values.primary.instance
)
info = {
"name": name,
"identifiers": {identifier},
"manufacturer": self.node.manufacturer_name,
"model": self.node.product_name,
}
if self.values.primary.instance > 1:
info["via_device"] = (DOMAIN, self.node_id)
elif self.node_id > 1:
info["via_device"] = (DOMAIN, 1)
return info
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
const.ATTR_NODE_ID: self.node_id,
const.ATTR_VALUE_INDEX: self.values.primary.index,
const.ATTR_VALUE_INSTANCE: self.values.primary.instance,
const.ATTR_VALUE_ID: str(self.values.primary.value_id),
}
if self.power_consumption is not None:
attrs[ATTR_POWER] = self.power_consumption
return attrs
def refresh_from_network(self):
"""Refresh all dependent values from zwave network."""
for value in self.values:
if value is not None:
self.node.refresh_value(value.value_id)
def _compute_unique_id(self):
if (
is_node_parsed(self.node) and self.values.primary.label != "Unknown"
) or self.node.is_ready:
return compute_value_unique_id(self.node, self.values.primary)
return None
def compute_value_unique_id(node, value):
"""Compute unique_id a value would get if it were to get one."""
return f"{node.node_id}-{value.object_id}"
| {
"content_hash": "15ee015aa46b2c97370ec0891e40fad2",
"timestamp": "",
"source": "github",
"line_count": 1318,
"max_line_length": 88,
"avg_line_length": 35.15857359635812,
"alnum_prop": 0.5876475538962861,
"repo_name": "Cinntax/home-assistant",
"id": "841b283a98dd1fa1aec7bfb3460643ac404ddb18",
"size": "46339",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zwave/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
} |
import nnabla.functions as F
def roi_align_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, output_size=(1, 1), spatial_scale=None, sampling_ratio=None, aligned=None, channel_last=False):
"""
Args:
grad_inputs (list of :obj:`nnabla.Variable`): Propagated grads to this backward function.
inputs (list of :obj:`nnabla.Variable` and None): Input Variables of the forward function
if this backward function depends on it. Otherwise, None is set instead.
input_shapes (list of tuple of :obj:`int`): Input shapes of the forward function.
The shapes of the inputs in which None is set can be passed.
outputs (list of :obj:`nnabla.Variable` and None): Output Variables of the forward function
if this backward function depends on it. Otherwise, None is set instead.
output_shapes (list of tuple of :obj:`int`): Output shapes of the forward function.
The shapes of the outputs in which None is set can be passed.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError("roi_align_backward is not implemented.")
| {
"content_hash": "d3d26eab195d3b371961a01f18d0eec7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 177,
"avg_line_length": 57.08695652173913,
"alnum_prop": 0.7075399847677075,
"repo_name": "sony/nnabla",
"id": "4ac122a7af4087d2cdfc867b89e9f69f269be2f5",
"size": "2184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/nnabla/backward_function/roi_align.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
} |
from statsmodels.tools.parallel import parallel_func
from numpy import arange, testing
from math import sqrt
def test_parallel():
x = arange(10.)
parallel, p_func, n_jobs = parallel_func(sqrt, n_jobs=-1, verbose=0)
y = parallel(p_func(i**2) for i in range(10))
testing.assert_equal(x,y)
| {
"content_hash": "607a1f00525ec877b1ead9d4b30ef722",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.7006578947368421,
"repo_name": "bavardage/statsmodels",
"id": "7af5fb28d69728893332fe35c41e645ed398da12",
"size": "304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "statsmodels/tools/tests/test_parallel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "CSS",
"bytes": "6244"
},
{
"name": "JavaScript",
"bytes": "16353"
},
{
"name": "Python",
"bytes": "6113420"
},
{
"name": "R",
"bytes": "12495"
},
{
"name": "Shell",
"bytes": "5156"
}
],
"symlink_target": ""
} |
import unittest
from airflow import configuration as conf
from airflow.configuration import AirflowConfigException
from airflow.lineage.backend.atlas import AtlasBackend
from airflow.lineage.datasets import File
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from tests.compat import mock
from backports.configparser import DuplicateSectionError
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestAtlas(unittest.TestCase):
def setUp(self):
conf.load_test_config()
try:
conf.conf.add_section("atlas")
except AirflowConfigException:
pass
except DuplicateSectionError:
pass
conf.conf.set("atlas", "username", "none")
conf.conf.set("atlas", "password", "none")
conf.conf.set("atlas", "host", "none")
conf.conf.set("atlas", "port", "0")
self.atlas = AtlasBackend()
@mock.patch("airflow.lineage.backend.atlas.Atlas")
def test_lineage_send(self, atlas_mock):
td = mock.MagicMock()
en = mock.MagicMock()
atlas_mock.return_value = mock.Mock(typedefs=td, entity_post=en)
dag = DAG(
dag_id='test_prepare_lineage',
start_date=DEFAULT_DATE
)
f1 = File("/tmp/does_not_exist_1")
f2 = File("/tmp/does_not_exist_2")
inlets_d = [f1, ]
outlets_d = [f2, ]
with dag:
op1 = DummyOperator(task_id='leave1',
inlets={"datasets": inlets_d},
outlets={"datasets": outlets_d})
ctx = {"ti": TI(task=op1, execution_date=DEFAULT_DATE)}
self.atlas.send_lineage(operator=op1, inlets=inlets_d,
outlets=outlets_d, context=ctx)
self.assertEqual(td.create.call_count, 1)
self.assertTrue(en.create.called)
self.assertEqual(len(en.mock_calls), 3)
# test can be broader
| {
"content_hash": "ff9c0bd2352b1e31eb2200ec04f63760",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 72,
"avg_line_length": 31.184615384615384,
"alnum_prop": 0.6151948692649235,
"repo_name": "r39132/airflow",
"id": "5898e683336d53e91a5372b186b6cc59b5109254",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lineage/backend/test_atlas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
from django.db.models import Count
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from .models import Card, Dish
class CardTestCase(TestCase):
def setUp(self):
pass # data loaded from fixtures
def test_filter_dishes(self):
dishes = Dish.objects.filter(name=u'Hamburger')
self.assertEqual(dishes.count(), 1)
def test_card_display_all(self):
cards = Card.objects.all()
self.assertEqual(cards.count(), 9)
def test_card_display_not_empty(self):
cards = Card.objects.filter(dishes__gt=0).annotate(dishes_count=Count('dishes'))
self.assertEqual(cards.count(), 8)
def test_index_response(self):
client = Client()
response = client.get(reverse('index') + '?sort_by=name')
self.assertEqual(response.status_code, 200)
def test_index_response_empty_page(self):
client = Client()
response = client.get(reverse('index') + '?page=100')
self.assertEqual(response.status_code, 200)
def test_creating_dish(self):
dish = Dish.objects.create(name='Test dish', description='Test desc', price=5, preparation_time=3, is_vegetarian=False)
dish.save()
last_dish = Dish.objects.all().latest('id')
self.assertEqual(dish.id, last_dish.id)
def test_creating_card(self):
card = Card.objects.create(name='Test card', description='Test description')
card.save()
last_card = Card.objects.all().latest('id')
self.assertEqual(card.id, last_card.id)
def test_api_response(self):
client = Client()
response = client.get(reverse('index_asynchronous'))
self.assertEqual(response.status_code, 200)
def test_api_cards_response(self):
client = Client()
response = client.get('/api/cards/1/')
self.assertEqual(response.status_code, 200)
def test_detail_response(self):
client = Client()
response = client.get(reverse('details', args=[2]))
self.assertEqual(response.status_code, 200)
| {
"content_hash": "1116d2001348bcfc1ed6c50c6b81d74a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 127,
"avg_line_length": 34.8,
"alnum_prop": 0.6484674329501916,
"repo_name": "mkost/eMenu",
"id": "462c2baa2ca28f58e66986f9e74c25a678f6c88b",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emenu/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12127"
},
{
"name": "Python",
"bytes": "17914"
}
],
"symlink_target": ""
} |
"""Corpus backup model"""
from sqlalchemy import Column, Sequence
from sqlalchemy.types import Integer, Unicode, UnicodeText, DateTime
from onlinelinguisticdatabase.model.meta import Base, now
import simplejson as json
class CorpusBackup(Base):
"""Define the corpus backup model.
.. note::
Unlike with the collection backup model, the corpus backup model does
not backup references to forms. This is because corpora will generally
reference many, many forms and it would be inefficient to store all of
these references as massive (mostly redundant) JSON arrays...
"""
__tablename__ = 'corpusbackup'
def __repr__(self):
return "<CorpusBackup (%s)>" % self.id
id = Column(Integer, Sequence('corpusbackup_seq_id', optional=True), primary_key=True)
corpus_id = Column(Integer)
UUID = Column(Unicode(36))
name = Column(Unicode(255))
type = Column(Unicode(255))
description = Column(UnicodeText)
content = Column(UnicodeText(length=2**31))
enterer = Column(UnicodeText)
modifier = Column(UnicodeText)
form_search = Column(UnicodeText)
datetime_entered = Column(DateTime)
datetime_modified = Column(DateTime, default=now)
tags = Column(UnicodeText)
def vivify(self, corpus_dict):
"""The vivify method gives life to a corpus_backup by specifying its
attributes using the to-be-backed-up corpus as represented in
``corpus_dict``. The relational attributes of the backup are converted
to (truncated) JSON objects.
"""
self.UUID = corpus_dict['UUID']
self.corpus_id = corpus_dict['id']
self.name = corpus_dict['name']
self.description = corpus_dict['description']
self.content = corpus_dict['content']
self.enterer = unicode(json.dumps(corpus_dict['enterer']))
self.modifier = unicode(json.dumps(corpus_dict['modifier']))
self.form_search = unicode(json.dumps(corpus_dict['form_search']))
self.datetime_entered = corpus_dict['datetime_entered']
self.datetime_modified = corpus_dict['datetime_modified']
self.tags = unicode(json.dumps(corpus_dict['tags']))
def get_dict(self):
"""Return a Python dictionary representation of the Corpus. This
facilitates JSON-stringification, cf. utils.JSONOLDEncoder. Relational
data are truncated, e.g., corpus_dict['enterer'] is a dict with keys
for 'id', 'first_name' and 'last_name' (cf. get_mini_user_dict) and
lacks keys for other attributes such as 'username',
'personal_page_content', etc.
"""
return {
'id': self.id,
'corpus_id': self.corpus_id,
'UUID': self.UUID,
'name': self.name,
'type': self.type,
'description': self.description,
'content': self.content,
'enterer': self.json_loads(self.enterer),
'modifier': self.json_loads(self.modifier),
'form_search': self.json_loads(self.form_search),
'datetime_entered': self.datetime_entered,
'datetime_modified': self.datetime_modified,
'tags': self.json_loads(self.tags)
}
| {
"content_hash": "011a468183c33b945ad8be3d026dbaa9",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 90,
"avg_line_length": 39.58536585365854,
"alnum_prop": 0.6426370918052988,
"repo_name": "jrwdunham/old",
"id": "9c3fe64bbabbb3db41fc4b926de22c268a752272",
"size": "3830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onlinelinguisticdatabase/model/corpusbackup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "66"
},
{
"name": "Python",
"bytes": "2840936"
},
{
"name": "Shell",
"bytes": "778"
}
],
"symlink_target": ""
} |
import sys, os, warnings
warnings.simplefilter("ignore", DeprecationWarning)
from ncclient import manager
def ericsson_connect(host, port, user, password, device_params):
return manager.connect(host=host,
port=port,
username=user,
password=password,
device_params=device_params,
hostkey_verify-false)
def enable_nc_prefix(host, user, password):
# add a parameter 'with_ns' to turn on/off 'nc'
device_params = {'name': 'ericsson', 'with_ns': True}
with ericsson_connect(host,
port=22,
user=user,
password=password,
device_params=device_params) as m:
ret = m.get_config(source="running").data_xml
print(ret)
def disable_nc_prefix(host, user, password):
# add a parameter 'with_ns' to turn on/off 'nc'
device_params = {'name': 'ericsson', 'with_ns': False}
with ericsson_connect(host,
port=22,
user=user,
password=password,
device_params=device_params) as m:
ret = m.get_config(source="running").data_xml
print(ret)
def demo(host, user, password):
enable_nc_prefix(host, user, password)
print("#"*50)
disable_nc_prefix(host, user, password)
if __name__ == '__main__':
demo(sys.argv[1], sys.argv[2], sys.argv[3])
| {
"content_hash": "a53f0ba69e974517b8897e6cef662684",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 64,
"avg_line_length": 31.448979591836736,
"alnum_prop": 0.5288773523685918,
"repo_name": "earies/ncclient",
"id": "29e569a9fd6a463cd728f2a3d3fd08a61b7c953c",
"size": "1912",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/vendor/ericsson/ericsson_nc_prefix_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "194806"
}
],
"symlink_target": ""
} |
"""
===============================================
.. module:: evodjango.i18n.models
:platform: Django
:synopsis:
.. moduleauthor:: (C) 2014 Oliver Gutiérrez
# TODO: Automatic translation support using Google Translate
"""
# Django imports
from django.db import models
from django.utils.translation import get_language, ugettext_lazy as _
from django.utils.functional import curry
from django.conf import settings
from django import forms
# EVODjango imports
from sitetools.models import JSONField
from sitetools.forms.widgets import TinyMCEWidget
from sitetools.i18n.forms import I18NField
class I18NTextField(JSONField):
"""
Internationalization TextField
"""
description = _('Internationalization TextField')
def formfield(self, **kwargs):
"""
Form field method overload
"""
kwargs.setdefault('required',not self.blank)
kwargs.setdefault('label',self.verbose_name)
return I18NField(**kwargs)
def contribute_to_class(self, cls, name):
"""
Contribute to class adding localized_FIELD methods to the model containing this field
"""
def get_localized_version(modelobj,lang=None):
"""
Function to show localized version of a field
"""
data=getattr(modelobj,name)
if lang is None:
lang=get_language()
value=''
if data:
if lang in data:
value=data[lang]
if not value:
if settings.LANGUAGE_CODE in data:
value=data[settings.LANGUAGE_CODE]
return value
get_localized_version.short_description = name
setattr(cls, 'localized_%s' % name, property(get_localized_version))
for lang,langname in settings.LANGUAGES:
setattr(cls, '%s_%s' % (name,lang), property(curry(get_localized_version,lang=lang)))
# Call original method
super(I18NTextField,self).contribute_to_class(cls, name)
class I18NCharField(I18NTextField):
"""
Internationalization CharField
"""
description = _('Internationalization CharField')
def formfield(self,**kwargs):
"""
Form field method overload
"""
kwargs.setdefault('max_length',self.max_length)
kwargs['widget']=forms.TextInput
return super(I18NCharField,self).formfield(**kwargs)
class I18NHTMLField(I18NTextField):
"""
Internationalization HTMLField
"""
description = _('Internationalization HTMLField')
def formfield(self,**kwargs):
"""
Form field method overload
"""
kwargs['widget']=TinyMCEWidget
return super(I18NHTMLField,self).formfield(**kwargs)
| {
"content_hash": "baf3b45427b884ade4d52d009971965e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 97,
"avg_line_length": 30.32608695652174,
"alnum_prop": 0.6204301075268818,
"repo_name": "olivergs/django-sitetools",
"id": "4a952bf042a59350f728e56b542b3cd9d3193347",
"size": "2815",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sitetools/i18n/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "788"
},
{
"name": "Python",
"bytes": "130433"
}
],
"symlink_target": ""
} |
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.load('RecoVertex/PrimaryVertexProducer/OfflinePrimaryVertices_cfi')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_029'),
annotation = cms.untracked.string('AODSIM'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/relval/CMSSW_5_3_2-START53_V6/RelValProdTTbar/AODSIM/v2/0000/9A630BD7-C3B9-E111-BAB3-00304867918E.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'START53_V10::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAODSIM_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
#process.output = cms.OutputModule("PoolOutputModule",
# outputCommands = cms.untracked.vstring('keep *'),
# fileName = cms.untracked.string ("test.root")
#)
process.bambu_step = cms.Path(process.BambuFillAODSIM)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
#process.outpath = cms.EndPath(process.output)
| {
"content_hash": "34a54ebb9adac4ee1c7d94190a6247d7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 148,
"avg_line_length": 37.875,
"alnum_prop": 0.7185289957567186,
"repo_name": "cpausmit/Kraken",
"id": "642f0e32485f7530c0d0db78517784d1aa247312",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filefi/029/mc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3913"
},
{
"name": "M4",
"bytes": "49993"
},
{
"name": "Monkey C",
"bytes": "246623"
},
{
"name": "PHP",
"bytes": "4827"
},
{
"name": "Python",
"bytes": "1402251"
},
{
"name": "Shell",
"bytes": "119172"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
__author__ = 'katharine'
import argparse
import os
import subprocess
import time
from pebble_tool.exceptions import BuildError
from pebble_tool.util.analytics import post_event
import pebble_tool.util.npm as npm
from pebble_tool.commands.sdk.project import SDKProjectCommand
class BuildCommand(SDKProjectCommand):
"""Builds the current project."""
command = "build"
def __call__(self, args):
super(BuildCommand, self).__call__(args)
start_time = time.time()
if len(self.project.dependencies) > 0:
post_event('app_build_with_npm_deps')
try:
npm.invoke_npm(["install"])
npm.invoke_npm(["dedupe"])
except subprocess.CalledProcessError:
post_event("app_build_failed_npm")
raise BuildError("npm failed.")
try:
waf = list(args.args)
try:
waf.remove('--')
except ValueError:
pass
extra_env = {}
if args.debug:
extra_env = {'CFLAGS': os.environ.get('CFLAGS', '') + ' -O0'}
self._waf("configure", extra_env=extra_env, args=waf)
self._waf("build", args=waf)
except subprocess.CalledProcessError:
duration = time.time() - start_time
post_event("app_build_failed", build_time=duration)
raise BuildError("Build failed.")
else:
duration = time.time() - start_time
has_js = os.path.exists(os.path.join('src', 'js'))
post_event("app_build_succeeded", has_js=has_js, line_counts=self._get_line_counts(), build_time=duration)
@classmethod
def _get_line_counts(cls):
c_line_count = 0
js_line_count = 0
if os.path.exists('src'):
c_line_count += cls._count_lines('src', ['.h', '.c'])
js_line_count += cls._count_lines('src', ['.js'])
return {'c_line_count': c_line_count, 'js_line_count': js_line_count}
@classmethod
def _count_lines(cls, path, extensions):
src_lines = 0
files = os.listdir(path)
for name in files:
if name.startswith('.'):
continue
if os.path.isdir(os.path.join(path, name)):
if not os.path.islink(os.path.join(path, name)):
src_lines += cls._count_lines(os.path.join(path, name), extensions)
continue
ext = os.path.splitext(name)[1]
if ext in extensions:
src_lines += sum(1 for line in open(os.path.join(path, name)))
return src_lines
@classmethod
def add_parser(cls, parser):
parser = super(BuildCommand, cls).add_parser(parser)
parser.add_argument('--debug', action='store_true', help="Build without optimisations for easier debugging. "
"This may cause apps to run slower or not fit at all.")
parser.add_argument('args', nargs=argparse.REMAINDER, help="Extra arguments to pass to waf.")
return parser
class CleanCommand(SDKProjectCommand):
command = "clean"
def __call__(self, args):
super(CleanCommand, self).__call__(args)
try:
self._waf("distclean")
except subprocess.CalledProcessError:
print("Clean failed.")
| {
"content_hash": "13769960ba8cfc0dcbb970f2b839cb3f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 120,
"avg_line_length": 37.02150537634409,
"alnum_prop": 0.5660760964275341,
"repo_name": "pebble/pebble-tool",
"id": "d7cd1a44f3abcf9dfd33dbf4e43558d664b68ab5",
"size": "3443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pebble_tool/commands/sdk/project/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1864"
},
{
"name": "CSS",
"bytes": "2938"
},
{
"name": "JavaScript",
"bytes": "8517"
},
{
"name": "Python",
"bytes": "177525"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import json
import argparse
import jobs
import utils
logger = logging.getLogger(__name__)
run_logger = logging.getLogger('josync_run')
def main():
parser = argparse.ArgumentParser(description='Test e-mail sending of Josync.')
parser.add_argument('address',help='e-mail address to send test message to',type=str)
args = parser.parse_args()
logger.info("E-mail test started. Josync version {}.".format(utils.version))
# parse global settings file
utils.read_config(default_cfg='default.josync-config',user_cfg='user.josync-config')
try:
utils.send_email(args.address, "Josync test e-mail", """
Congratulations, this e-mail was successfully sent from Josync.
""")
except Exception as e:
run_logger.exception(e)
logger.info("Session ended.")
if __name__ == '__main__':
with open('default.josync-logging') as f:
log_config = json.loads(f.read())
logging.config.dictConfig(log_config)
logging.getLogger().setLevel(logging.DEBUG)
main()
| {
"content_hash": "ae752707855647f80bac96f52fb40325",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 25.25581395348837,
"alnum_prop": 0.6721915285451197,
"repo_name": "jeinarsson/josync",
"id": "ea550769fb43413305fd2fd4405b84c4a494ce1e",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "testmail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33349"
},
{
"name": "Shell",
"bytes": "6459"
}
],
"symlink_target": ""
} |
__all__ = ['normal', 'uniform', 'poisson']
from ..defmatrix import *
# Special object used internally to specify the placeholder which will be replaced by output ID
# This helps to provide dml containing output ID in constructSamplingNode
OUTPUT_ID = '$$OutputID$$'
def constructSamplingNode(inputs, dml):
"""
Convenient utility to create an intermediate of AST.
Parameters
----------
inputs = list of input matrix objects and/or DMLOp
dml = list of DML string (which will be eventually joined before execution). To specify out.ID, please use the placeholder
"""
dmlOp = DMLOp(inputs)
out = matrix(None, op=dmlOp)
dmlOp.dml = [out.ID if x == OUTPUT_ID else x for x in dml]
return out
INPUTS = []
def asStr(arg):
"""
Internal use only: Convenient utility to update inputs and return appropriate string value
"""
if isinstance(arg, matrix):
INPUTS = INPUTS + [arg]
return arg.ID
else:
return str(arg)
def normal(loc=0.0, scale=1.0, size=(1, 1), sparsity=1.0):
"""
Draw random samples from a normal (Gaussian) distribution.
Parameters
----------
loc: Mean ("centre") of the distribution.
scale: Standard deviation (spread or "width") of the distribution.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.normal(loc=3, scale=2, size=(3,3))
>>> m1.toNumPy()
array([[ 3.48857226, 6.17261819, 2.51167259],
[ 3.60506708, -1.90266305, 3.97601633],
[ 3.62245706, 5.9430881 , 2.53070413]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
loc = asStr(loc)
scale = asStr(scale)
sparsity = asStr(sparsity)
# loc + scale*standard normal
return constructSamplingNode(INPUTS, [
OUTPUT_ID, ' = ', loc, ' + ', scale, ' * random.normal(', rows, ',', cols, ',', sparsity, ')\n'])
def uniform(low=0.0, high=1.0, size=(1, 1), sparsity=1.0):
"""
Draw samples from a uniform distribution.
Parameters
----------
low: Lower boundary of the output interval.
high: Upper boundary of the output interval.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.uniform(size=(3,3))
>>> m1.toNumPy()
array([[ 0.54511396, 0.11937437, 0.72975775],
[ 0.14135946, 0.01944448, 0.52544478],
[ 0.67582422, 0.87068849, 0.02766852]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
low = asStr(low)
high = asStr(high)
sparsity = asStr(sparsity)
return constructSamplingNode(INPUTS, [
OUTPUT_ID, ' = random.uniform(', rows, ',', cols, ',', sparsity, ',', low, ',', high, ')\n'])
def poisson(lam=1.0, size=(1, 1), sparsity=1.0):
"""
Draw samples from a Poisson distribution.
Parameters
----------
lam: Expectation of interval, should be > 0.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.poisson(lam=1, size=(3,3))
>>> m1.toNumPy()
array([[ 1., 0., 2.],
[ 1., 0., 0.],
[ 0., 0., 0.]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
lam = asStr(lam)
sparsity = asStr(sparsity)
return constructSamplingNode(INPUTS, [
OUTPUT_ID, ' = random.poisson(', rows, ',', cols, ',', sparsity, ',', lam, ')\n'])
| {
"content_hash": "64ce1ceb223685fc806c69287063c4e4",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 130,
"avg_line_length": 30.05442176870748,
"alnum_prop": 0.5796740606609325,
"repo_name": "gweidner/systemml",
"id": "ab74250a5dfdf7dce33ede01e815a4d42f35915a",
"size": "5336",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/main/python/systemml/random/sampling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "31629"
},
{
"name": "Batchfile",
"bytes": "23989"
},
{
"name": "C",
"bytes": "9256"
},
{
"name": "C++",
"bytes": "35736"
},
{
"name": "CMake",
"bytes": "10373"
},
{
"name": "Cuda",
"bytes": "93696"
},
{
"name": "Java",
"bytes": "14308742"
},
{
"name": "Jupyter Notebook",
"bytes": "107164"
},
{
"name": "Makefile",
"bytes": "2470"
},
{
"name": "Python",
"bytes": "363051"
},
{
"name": "R",
"bytes": "825297"
},
{
"name": "Scala",
"bytes": "250021"
},
{
"name": "Shell",
"bytes": "162490"
}
],
"symlink_target": ""
} |
# Copyright (c) 2018, The MITRE Corporation
# All rights reserved
from mixbox import fields
from mixbox import idgen
import maec
from . import _namespace
import maec.bindings.maec_bundle as bundle_binding
from cybox.core.action_reference import ActionReference
from cybox.common.measuresource import MeasureSource
from cybox.common.platform_specification import PlatformSpecification
from cybox.objects.code_object import Code
class BehavioralActionEquivalenceReference(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionEquivalenceReferenceType
_namespace = _namespace
action_equivalence_idref = fields.TypedField('action_equivalence_idref')
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralActionReference(ActionReference):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionReferenceType
_namespace = _namespace
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralAction(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionType
_namespace = _namespace
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralActions(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionsType
_namespace = _namespace
#TODO: action_collection.type_ is set below to avoid circular import.
action_collection = fields.TypedField('Action_Collection', None, multiple=True)
action = fields.TypedField('Action', BehavioralAction, multiple=True)
action_reference = fields.TypedField('Action_Reference', BehavioralActionReference, multiple=True)
action_equivalence_reference = fields.TypedField('Action_Equivalence_Reference', BehavioralActionEquivalenceReference, multiple=True)
class PlatformList(maec.EntityList):
_binding = bundle_binding
_binding_class = bundle_binding.PlatformListType
_namespace = _namespace
platform = fields.TypedField("Platform", PlatformSpecification, multiple=True)
class CVEVulnerability(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.CVEVulnerabilityType
_namespace = _namespace
cve_id = fields.TypedField('cve_id')
description = fields.TypedField('Description')
class Exploit(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.ExploitType
_namespace = _namespace
known_vulnerability = fields.TypedField('known_vulnerability')
cve = fields.TypedField('CVE', CVEVulnerability)
cwe_id = fields.TypedField('CWE_ID', multiple=True)
targeted_platforms = fields.TypedField('Targeted_Platforms', PlatformList)
class BehaviorPurpose(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehaviorPurposeType
_namespace = _namespace
description = fields.TypedField('Description')
vulnerability_exploit = fields.TypedField('Vulnerability_Exploit', Exploit)
class AssociatedCode(maec.EntityList):
_binding = bundle_binding
_binding_class = bundle_binding.AssociatedCodeType
_namespace = _namespace
code_snippet = fields.TypedField("Code_Snippet", Code, multiple=True)
class Behavior(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehaviorType
_namespace = _namespace
id_ = fields.TypedField('id')
ordinal_position = fields.TypedField('ordinal_position')
status = fields.TypedField('status')
duration = fields.TypedField('duration')
purpose = fields.TypedField('Purpose', BehaviorPurpose)
description = fields.TypedField('Description')
discovery_method = fields.TypedField('Discovery_Method', MeasureSource)
action_composition = fields.TypedField('Action_Composition', BehavioralActions)
associated_code = fields.TypedField('Associated_Code', AssociatedCode)
#relationships = fields.TypedField('Relationships', BehaviorRelationshipList) # TODO: implement
def __init__(self, id = None, description = None):
super(Behavior, self).__init__()
if id:
self.id_ = id
else:
self.id_ = idgen.create_id(prefix="behavior")
self.description = description
from maec.bundle.bundle import ActionCollection
BehavioralActions.action_collection.type_ = ActionCollection
| {
"content_hash": "bc3a76cd1dc56aa9ca06165db6ae983b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 137,
"avg_line_length": 39.6283185840708,
"alnum_prop": 0.733363108530594,
"repo_name": "MAECProject/python-maec",
"id": "b4dc62e7bf4e51c87562f5c137bad7c4f69637ef",
"size": "4501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maec/bundle/behavior.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "862178"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0015_profile_email_footer")]
operations = [
migrations.AddField(
model_name="user",
name="notify_new_case",
field=models.BooleanField(
default=False,
help_text="Whether or not to notify user about all new cases",
verbose_name="Notify about new case",
),
),
migrations.AddField(
model_name="user",
name="notify_unassigned_letter",
field=models.BooleanField(
default=False,
help_text="Whether or not to notify user about any letter in free cases",
verbose_name="Notify about letter in free cases",
),
),
]
| {
"content_hash": "5c932d2822e578b9812e20fe62d5e842",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 89,
"avg_line_length": 31.51851851851852,
"alnum_prop": 0.54524089306698,
"repo_name": "watchdogpolska/poradnia",
"id": "ba45a74535117f1353329c0d7a8d2f53cf4a7feb",
"size": "900",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "poradnia/users/migrations/0016_auto_20170929_0612.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "213565"
},
{
"name": "Dockerfile",
"bytes": "212"
},
{
"name": "HTML",
"bytes": "149976"
},
{
"name": "JavaScript",
"bytes": "1251748"
},
{
"name": "Makefile",
"bytes": "912"
},
{
"name": "Python",
"bytes": "461894"
},
{
"name": "SCSS",
"bytes": "55433"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
} |
from manila.api import common
from manila.common import constants
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = 'security_services'
_detail_version_modifiers = [
'add_ou_to_security_service',
]
def summary_list(self, request, security_services):
"""Show a list of security services without many details."""
return self._list_view(self.summary, request, security_services)
def detail_list(self, request, security_services):
"""Detailed view of a list of security services."""
return self._list_view(self.detail, request, security_services)
def summary(self, request, security_service):
"""Generic, non-detailed view of a security service."""
return {
'security_service': {
'id': security_service.get('id'),
'name': security_service.get('name'),
'type': security_service.get('type'),
# NOTE(vponomaryov): attr "status" was removed from model and
# is left in view for compatibility purposes since it affects
# user-facing API. This should be removed right after no one
# uses it anymore.
'status': constants.STATUS_NEW,
}
}
def detail(self, request, security_service):
"""Detailed view of a single security service."""
view = self.summary(request, security_service)
keys = (
'created_at', 'updated_at', 'description', 'dns_ip', 'server',
'domain', 'user', 'password', 'project_id')
for key in keys:
view['security_service'][key] = security_service.get(key)
self.update_versioned_resource_dict(
request, view['security_service'], security_service)
return view
@common.ViewBuilder.versioned_method("2.44")
def add_ou_to_security_service(self, context, ss_dict, ss):
ss_dict['ou'] = ss.get('ou')
def _list_view(self, func, request, security_services):
"""Provide a view for a list of security services."""
security_services_list = [func(request, service)['security_service']
for service in security_services]
security_services_dict = dict(security_services=security_services_list)
return security_services_dict
| {
"content_hash": "ee6852bd21f1c725e1d5e80159557a51",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 42.36842105263158,
"alnum_prop": 0.6128364389233955,
"repo_name": "openstack/manila",
"id": "62dd6a976f1218507101dde63bed2672539b807e",
"size": "3045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/api/views/security_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "12728998"
},
{
"name": "Shell",
"bytes": "107601"
}
],
"symlink_target": ""
} |
from twisted.internet import defer
from twisted.trial import unittest
from go_api.queue import PausingDeferredQueue
class ImmediateFailureMixin(object):
"""
Add additional assertion methods.
"""
def assertImmediateFailure(self, deferred, exception):
"""
Assert that the given Deferred current result is a Failure with the
given exception.
@return: The exception instance in the Deferred.
"""
failures = []
deferred.addErrback(failures.append)
self.assertEqual(len(failures), 1)
self.assertTrue(failures[0].check(exception))
return failures[0].value
class TestPausingDeferredQueue(
unittest.SynchronousTestCase, ImmediateFailureMixin):
def test_empty_queue_underflow(self):
"""
When the total amount of deferred gets is exceeded, a L{QueueUnderflow}
error is raised.
"""
backlog = 2
q = PausingDeferredQueue(size=3, backlog=2)
for i in range(backlog):
q.get()
self.assertRaises(defer.QueueUnderflow, q.get)
def test_backlog_queue(self):
"""
If there is a backlog of gets for a queue, they are fulfilled when
values are placed into the queue.
"""
backlog = 2
q = PausingDeferredQueue(size=3, backlog=backlog)
gotten = []
# Create backlog
for i in range(backlog):
q.get().addCallback(gotten.append)
# Fill queue to satisfy backlog
for i in range(backlog):
d = q.put(i)
self.assertEqual(self.successResultOf(d), None)
self.assertEqual(gotten, list(range(i + 1)))
def test_fill_queue(self):
"""
A queue of size size is created and filled. If we try to add another
object to the queue, the returned defer will only fire if an object is
removed from the queue.
"""
size = 3
q = PausingDeferredQueue(size=size, backlog=2)
for i in range(size - 1):
d = q.put(i)
self.assertEqual(self.successResultOf(d), None)
# This next put fills the queue, so the deferred we return will only
# get its result when the queue shrinks.
put_d = q.put(size)
self.assertNoResult(put_d)
# When we pull something out of the queue, put_d fires and we're able
# to put another thing into the queue.
gotten = []
q.get().addCallback(gotten.append)
self.assertEqual(gotten, [0])
self.assertEqual(self.successResultOf(put_d), None)
put_d = q.put(size)
self.assertNoResult(put_d)
def test_get_with_pending_put(self):
"""
A put() call in a callback on a deferred returned from put() may be
called synchronously before the get() that triggers it returns, so
get() must handle this safely.
"""
@defer.inlineCallbacks
def fill_queue(q):
for i in [0, 1, 2]:
yield q.put(i)
q = PausingDeferredQueue(size=1)
fill_d = fill_queue(q)
self.assertNoResult(fill_d)
gotten = []
q.get().addCallback(gotten.append)
self.assertEqual(gotten, [0])
self.assertNoResult(fill_d)
q.get().addCallback(gotten.append)
self.assertEqual(gotten, [0, 1])
self.assertNoResult(fill_d)
q.get().addCallback(gotten.append)
self.assertEqual(gotten, [0, 1, 2])
self.successResultOf(fill_d)
def test_queue_overflow(self):
"""
If you try to add more elements than size, a L{QueueOverflow} error
will be thrown.
"""
size = 3
q = PausingDeferredQueue(size=size, backlog=2)
for i in range(size):
q.put(i)
self.assertRaises(defer.QueueOverflow, q.put, None)
def test_queue_no_limits(self):
"""
You can put and get objects from the queue when there are no limits
supplied.
"""
q = PausingDeferredQueue()
gotten = []
for i in range(3):
q.get().addCallback(gotten.append)
for i in range(3):
d = q.put(i)
self.assertEqual(self.successResultOf(d), None)
self.assertEqual(gotten, list(range(3)))
def test_zero_size_overflow(self):
"""
A L{QueueOverflow} error is raised when there is a put request on a
queue of size 0
"""
q = PausingDeferredQueue(size=0)
self.assertRaises(defer.QueueOverflow, q.put, None)
def test_zero_backlog_underflow(self):
"""
A L{QueueUnderflow} error is raised when there is a get request on a
queue with a backlog of 0.
"""
queue = PausingDeferredQueue(backlog=0)
self.assertRaises(defer.QueueUnderflow, queue.get)
def test_cancelQueueAfterSynchronousGet(self):
"""
When canceling a L{Deferred} from a L{PausingDeferredQueue} that
already has a result, the cancel should have no effect.
"""
def _failOnErrback(_):
self.fail("Unexpected errback call!")
queue = PausingDeferredQueue()
d = queue.get()
d.addErrback(_failOnErrback)
queue.put(None)
d.cancel()
def test_cancelQueueAfterGet(self):
"""
When canceling a L{Deferred} from a L{PausingDeferredQueue} that does
not have a result (i.e., the L{Deferred} has not fired), the cancel
causes a L{defer.CancelledError} failure. If the queue has a result
later on, it doesn't try to fire the deferred.
"""
queue = PausingDeferredQueue()
d = queue.get()
d.cancel()
self.assertImmediateFailure(d, defer.CancelledError)
def cb(ignore):
# If the deferred is still linked with the deferred queue, it will
# fail with an AlreadyCalledError
queue.put(None)
return queue.get().addCallback(self.assertIdentical, None)
d.addCallback(cb)
done = []
d.addCallback(done.append)
self.assertEqual(len(done), 1)
| {
"content_hash": "3419dce6f810f612ea1addefda84c23d",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 79,
"avg_line_length": 32.8563829787234,
"alnum_prop": 0.5977011494252874,
"repo_name": "praekelt/go-api-toolkit",
"id": "55cb25b6040eeae015af3cd197d868766fea37de",
"size": "6177",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "go_api/queue/tests/test_pausing_deferred_queue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "101203"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
} |
import time
import random
def add(x,y):
return x + y
def subtract(x,y):
return x - y
def multiply(x,y):
return x * y
score = 0
operator = 0
question = 0
#This part sets the variables for the question number, score and the operator
print('Welcome to my brilliant maths quiz\n')
time.sleep(1.5)
print()
print('What is your name?')
name = input('Name: ')
print()
print('Welcome to the quiz', name)
#This bit of the code asks for ther users name and then displays that name to the screen
time.sleep(2)
print('Which class are you in? A,B or C')
group = input('Class: ')
if group == 'A' or group == 'a':
file = open('Class_A_Results.txt', 'a')
if group == 'B' or group =='b':
file = open('Class_B_Results.txt', 'a')
if group == 'C' or group =='c':
file = open('Class_C_Results.txt', 'a')
#This bit of the code asks for ther users name and then displays that name to the screen
while question < 10:
#This part sets the number of questions to 10
n1 = random.randint(0,12)
n2 = random.randint(0, 12)
operator = random.randint (1,3)
#This bit of code sets the boundries for the random numbers and the operators
if operator == 1:
print(n1, "+", n2)
elif operator == 2:
print(n1, "-", n2)
elif operator == 3:
print(n1, "*", n2)
#This bit determines which operator is shown to the screen
if operator == 1:
ans = add(n1,n2)
elif operator == 2:
ans = subtract(n1,n2)
elif operator == 3:
ans = multiply(n1,n2)
#This part sets the answer to the question
answer = int(input())
if answer == ans:
print("Correct")
score = score + 1
else:
print("Incorrect")
question = question +1
#This part allows the user to put in an answer and tells them if they are right or not
print()
print()
print ('Score = ',score)
file.write(name)
file.write(':')
file.write(str(score))
file.write("\n")
file.close()
if score <=5:
print ('Unlucky')
else:
print('Well done')
#This part adds up the score and tells them the score and a message
| {
"content_hash": "60c4f1e1523e73b16320aa23517fcd3f",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 88,
"avg_line_length": 24.88095238095238,
"alnum_prop": 0.6291866028708134,
"repo_name": "Green-octopus678/Computer-Science",
"id": "2028979bb67b4ae80d50a8661f06a385d879e429",
"size": "2090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Need gelp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5763"
}
],
"symlink_target": ""
} |
from tempest.api.object_storage import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class AccountQuotasTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(AccountQuotasTest, cls).resource_setup()
cls.container_name = data_utils.rand_name(name="TestContainer")
cls.container_client.create_container(cls.container_name)
cls.data.setup_test_user(reseller=True)
cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
# Retrieve a ResellerAdmin auth data and use it to set a quota
# on the client's account
cls.reselleradmin_auth_data = \
cls.os_reselleradmin.auth_provider.auth_data
def setUp(self):
super(AccountQuotasTest, self).setUp()
# Set the reselleradmin auth in headers for next custom_account_client
# request
self.custom_account_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.reselleradmin_auth_data
)
# Set a quota of 20 bytes on the user's account before each test
headers = {"X-Account-Meta-Quota-Bytes": "20"}
self.os.custom_account_client.request("POST", url="", headers=headers,
body="")
def tearDown(self):
# Set the reselleradmin auth in headers for next custom_account_client
# request
self.custom_account_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.reselleradmin_auth_data
)
# remove the quota from the container
headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
self.os.custom_account_client.request("POST", url="", headers=headers,
body="")
super(AccountQuotasTest, self).tearDown()
@classmethod
def resource_cleanup(cls):
if hasattr(cls, "container_name"):
cls.delete_containers([cls.container_name])
super(AccountQuotasTest, cls).resource_cleanup()
@test.attr(type="smoke")
@test.requires_ext(extension='account_quotas', service='object')
def test_upload_valid_object(self):
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
@test.attr(type=["smoke"])
@test.requires_ext(extension='account_quotas', service='object')
def test_admin_modify_quota(self):
"""Test that the ResellerAdmin is able to modify and remove the quota
on a user's account.
Using the custom_account client, the test modifies the quota
successively to:
* "25": a random value different from the initial quota value.
* "" : an empty value, equivalent to the removal of the quota.
* "20": set the quota to its initial value.
"""
for quota in ("25", "", "20"):
self.custom_account_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.reselleradmin_auth_data
)
headers = {"X-Account-Meta-Quota-Bytes": quota}
resp, _ = self.os.custom_account_client.request("POST", url="",
headers=headers,
body="")
self.assertEqual(resp["status"], "204")
self.assertHeaders(resp, 'Account', 'POST')
| {
"content_hash": "4c8fa4cf107483b64f0c4bbb6d8023a8",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 38.61224489795919,
"alnum_prop": 0.5993657505285412,
"repo_name": "Lilywei123/tempest",
"id": "e75e971b74793a832d7b1b7e4c4f7702b3066ccf",
"size": "4447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/object_storage/test_account_quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2780467"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import os
import glob
from .basic_script import BasicScript
class Tlacuilo(BasicScript):
"""Compile the XML code for the workflows based on the given templates and configuration files"""
def get_arguments(self):
"""
Get the arguments to configure current script
:return: list
"""
return [
{
'short': '-w',
'long': '--wf-dir',
'help': 'Folder where all the workflows files are present'
},
{
'short': '-t',
'long': '--template-dir',
'help': 'Folder where all the templates can be found, it should be the root folder inside them all the templates can be found',
'required': True
},
{
'short': '-o',
'long': '--output-dir',
'help': 'Directory where to store the compiled code'
},
{
'short': '-e',
'long': '--extra',
'help': 'Extra variables to send to the templates',
'action': 'append'
},
{
'short': '-i',
'long': '--cluster-information',
'action': 'store_true',
'help': 'Ask interactively for cluster information in order to replace variables in the template',
'default': False
},
{
'short': '-l',
'long': '--local-mode',
'action': 'store_true',
'help': 'If set, we assume that we\'re on the cluster and no remote connection needs to be made',
'default': False
}
]
def run(self, args, injector):
"""
Run the component to compile the XML workflows using the given templates and configuration files
:param args: Namespace
:param injector: Injector
"""
logger = injector.get('logger')
output_directory = args.output_dir if None != args.output_dir else injector.get(
'filesystem').generate_temp_dir()
if not os.path.isabs(output_directory):
raise IOError('Output directory should be an absolute path in order to save the files')
logger.info('Getting configuration from files')
configuration = self.get_wf_configuration(args, injector)
if args.cluster_information:
cluster_id = injector.get('interactive_cluster_id').get()
for k, v in injector.get('emr_cluster').get_cluster_information(cluster_id).items():
configuration[k] = v
configuration.output_directory = output_directory
if type(args.extra) == list:
logger.info('Generating configuration from extra variables')
for value in args.extra:
value_splitted = value.split('=')
if len(value_splitted) > 1:
configuration[value_splitted[0]] = value_splitted[1]
else:
configuration[value] = ''
logger.info('Rendering workflows code')
injector.get('wf_templates_render').render_workflow_folder(
args.template_dir,
configuration.template,
output_directory,
configuration
)
logger.info('Checking for .py spark files on selected configuration folders')
python_files = []
for path in configuration.config_paths:
root_path = os.path.dirname(path)
current_path_files = glob.glob(os.path.join(root_path, '*.py'))
python_files.extend(current_path_files)
if python_files:
logger.info('Copying .py spark files into tmp folder')
injector.get('filesystem').mkdir(os.path.join(output_directory, 'lib/'))
for py in python_files:
injector.get('filesystem').cp(
py,
os.path.join(output_directory, 'lib/')
) | {
"content_hash": "ea6dadc84342fe5381b308fb0e5b622b",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 143,
"avg_line_length": 37.574074074074076,
"alnum_prop": 0.5288319369147363,
"repo_name": "scm-spain/slippin-jimmy",
"id": "7f69bafc3cd4f93131bcc229b70bbc7679c66976",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/slippinj/cli/scripts/tlacuilo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "648"
},
{
"name": "Python",
"bytes": "173154"
}
],
"symlink_target": ""
} |
"""
This module defines exceptions for Presto operations. It follows the structure
defined in pep-0249.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
import time
import prestodb.logging
logger = prestodb.logging.get_logger(__name__)
class HttpError(Exception):
pass
class Http503Error(HttpError):
pass
class PrestoError(Exception):
pass
class TimeoutError(Exception):
pass
class PrestoQueryError(Exception):
def __init__(self, error, query_id=None):
self._error = error
self._query_id = query_id
@property
def error_code(self):
return self._error.get('errorCode', None)
@property
def error_name(self):
return self._error.get('errorName', None)
@property
def error_type(self):
return self._error.get('errorType', None)
@property
def error_exception(self):
return self.failure_info.get('type', None) if self.failure_info else None
@property
def failure_info(self):
return self._error.get('failureInfo', None)
@property
def message(self):
return self._error.get(
'message',
'Presto did no return an error message',
)
@property
def error_location(self):
location = self._error['errorLocation']
return (location['lineNumber'], location['columnNumber'])
@property
def query_id(self):
return self._query_id
def __repr__(self):
return '{}(type={}, name={}, message="{}", query_id={})'.format(
self.__class__.__name__,
self.error_type,
self.error_name,
self.message,
self.query_id,
)
def __str__(self):
return repr(self)
class PrestoExternalError(PrestoQueryError):
pass
class PrestoInternalError(PrestoQueryError):
pass
class PrestoUserError(PrestoQueryError):
pass
def retry_with(handle_retry, exceptions, conditions, max_attempts):
def wrapper(func):
@functools.wraps(func)
def decorated(*args, **kwargs):
error = None
result = None
for attempt in range(1, max_attempts + 1):
try:
result = func(*args, **kwargs)
if any(guard(result) for guard in conditions):
handle_retry.retry(func, args, kwargs, None, attempt)
continue
return result
except Exception as err:
error = err
if any(isinstance(err, exc) for exc in exceptions):
handle_retry.retry(func, args, kwargs, err, attempt)
continue
break
logger.info('failed after {} attempts'.format(attempt))
if error is not None:
raise error
return result
return decorated
return wrapper
class DelayExponential(object):
def __init__(
self,
base=0.1, # 100ms
exponent=2,
jitter=True,
max_delay=2 * 3600, # 2 hours
):
self._base = base
self._exponent = exponent
self._jitter = jitter
self._max_delay = max_delay
def __call__(self, attempt):
delay = float(self._base) * (self._exponent ** attempt)
if self._jitter:
delay *= random.random()
delay = min(float(self._max_delay), delay)
return delay
class RetryWithExponentialBackoff(object):
def __init__(
self,
base=0.1, # 100ms
exponent=2,
jitter=True,
max_delay=2 * 3600 # 2 hours
):
self._get_delay = DelayExponential(
base, exponent, jitter, max_delay)
def retry(self, func, args, kwargs, err, attempt):
delay = self._get_delay(attempt)
time.sleep(delay)
# PEP 249
class Error(Exception):
pass
class Warning(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
| {
"content_hash": "ff52771edb3629cd8e6a75858c8dc324",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 81,
"avg_line_length": 21.676470588235293,
"alnum_prop": 0.5850293984622342,
"repo_name": "ggreg/presto-python-client",
"id": "a30a0759cc1454a478ed1b7569e827449a1bbd6a",
"size": "4966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prestodb/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1139"
},
{
"name": "Python",
"bytes": "85654"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
} |
"""Constants used by the Withings component."""
import homeassistant.const as const
DATA_MANAGER = "data_manager"
BASE_URL = "base_url"
CLIENT_ID = "client_id"
CLIENT_SECRET = "client_secret"
CODE = "code"
CONFIG = "config"
CREDENTIALS = "credentials"
DOMAIN = "withings"
LOG_NAMESPACE = "homeassistant.components.withings"
MEASURES = "measures"
PROFILE = "profile"
PROFILES = "profiles"
AUTH_CALLBACK_PATH = "/api/withings/authorize"
AUTH_CALLBACK_NAME = "withings:authorize"
THROTTLE_INTERVAL = 60
STATE_UNKNOWN = const.STATE_UNKNOWN
STATE_AWAKE = "awake"
STATE_DEEP = "deep"
STATE_LIGHT = "light"
STATE_REM = "rem"
MEASURE_TYPE_BODY_TEMP = 71
MEASURE_TYPE_BONE_MASS = 88
MEASURE_TYPE_DIASTOLIC_BP = 9
MEASURE_TYPE_FAT_MASS = 8
MEASURE_TYPE_FAT_MASS_FREE = 5
MEASURE_TYPE_FAT_RATIO = 6
MEASURE_TYPE_HEART_PULSE = 11
MEASURE_TYPE_HEIGHT = 4
MEASURE_TYPE_HYDRATION = 77
MEASURE_TYPE_MUSCLE_MASS = 76
MEASURE_TYPE_PWV = 91
MEASURE_TYPE_SKIN_TEMP = 73
MEASURE_TYPE_SLEEP_DEEP_DURATION = "deepsleepduration"
MEASURE_TYPE_SLEEP_HEART_RATE_AVERAGE = "hr_average"
MEASURE_TYPE_SLEEP_HEART_RATE_MAX = "hr_max"
MEASURE_TYPE_SLEEP_HEART_RATE_MIN = "hr_min"
MEASURE_TYPE_SLEEP_LIGHT_DURATION = "lightsleepduration"
MEASURE_TYPE_SLEEP_REM_DURATION = "remsleepduration"
MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_AVERAGE = "rr_average"
MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_MAX = "rr_max"
MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_MIN = "rr_min"
MEASURE_TYPE_SLEEP_STATE_AWAKE = 0
MEASURE_TYPE_SLEEP_STATE_DEEP = 2
MEASURE_TYPE_SLEEP_STATE_LIGHT = 1
MEASURE_TYPE_SLEEP_STATE_REM = 3
MEASURE_TYPE_SLEEP_TOSLEEP_DURATION = "durationtosleep"
MEASURE_TYPE_SLEEP_TOWAKEUP_DURATION = "durationtowakeup"
MEASURE_TYPE_SLEEP_WAKEUP_DURATION = "wakeupduration"
MEASURE_TYPE_SLEEP_WAKUP_COUNT = "wakeupcount"
MEASURE_TYPE_SPO2 = 54
MEASURE_TYPE_SYSTOLIC_BP = 10
MEASURE_TYPE_TEMP = 12
MEASURE_TYPE_WEIGHT = 1
MEAS_BODY_TEMP_C = "body_temperature_c"
MEAS_BONE_MASS_KG = "bone_mass_kg"
MEAS_DIASTOLIC_MMHG = "diastolic_blood_pressure_mmhg"
MEAS_FAT_FREE_MASS_KG = "fat_free_mass_kg"
MEAS_FAT_MASS_KG = "fat_mass_kg"
MEAS_FAT_RATIO_PCT = "fat_ratio_pct"
MEAS_HEART_PULSE_BPM = "heart_pulse_bpm"
MEAS_HEIGHT_M = "height_m"
MEAS_HYDRATION = "hydration"
MEAS_MUSCLE_MASS_KG = "muscle_mass_kg"
MEAS_PWV = "pulse_wave_velocity"
MEAS_SKIN_TEMP_C = "skin_temperature_c"
MEAS_SLEEP_DEEP_DURATION_SECONDS = "sleep_deep_duration_seconds"
MEAS_SLEEP_HEART_RATE_AVERAGE = "sleep_heart_rate_average_bpm"
MEAS_SLEEP_HEART_RATE_MAX = "sleep_heart_rate_max_bpm"
MEAS_SLEEP_HEART_RATE_MIN = "sleep_heart_rate_min_bpm"
MEAS_SLEEP_LIGHT_DURATION_SECONDS = "sleep_light_duration_seconds"
MEAS_SLEEP_REM_DURATION_SECONDS = "sleep_rem_duration_seconds"
MEAS_SLEEP_RESPIRATORY_RATE_AVERAGE = "sleep_respiratory_average_bpm"
MEAS_SLEEP_RESPIRATORY_RATE_MAX = "sleep_respiratory_max_bpm"
MEAS_SLEEP_RESPIRATORY_RATE_MIN = "sleep_respiratory_min_bpm"
MEAS_SLEEP_STATE = "sleep_state"
MEAS_SLEEP_TOSLEEP_DURATION_SECONDS = "sleep_tosleep_duration_seconds"
MEAS_SLEEP_TOWAKEUP_DURATION_SECONDS = "sleep_towakeup_duration_seconds"
MEAS_SLEEP_WAKEUP_COUNT = "sleep_wakeup_count"
MEAS_SLEEP_WAKEUP_DURATION_SECONDS = "sleep_wakeup_duration_seconds"
MEAS_SPO2_PCT = "spo2_pct"
MEAS_SYSTOLIC_MMGH = "systolic_blood_pressure_mmhg"
MEAS_TEMP_C = "temperature_c"
MEAS_WEIGHT_KG = "weight_kg"
UOM_BEATS_PER_MINUTE = "bpm"
UOM_BREATHS_PER_MINUTE = "br/m"
UOM_FREQUENCY = "times"
UOM_METERS_PER_SECOND = "m/s"
UOM_MMHG = "mmhg"
UOM_PERCENT = "%"
UOM_LENGTH_M = const.LENGTH_METERS
UOM_MASS_KG = const.MASS_KILOGRAMS
UOM_SECONDS = "seconds"
UOM_TEMP_C = const.TEMP_CELSIUS
| {
"content_hash": "7e79a8810bf8f795012e11c32c6acf9a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 72,
"avg_line_length": 34.95145631067961,
"alnum_prop": 0.7569444444444444,
"repo_name": "Cinntax/home-assistant",
"id": "79527d9d557be492f7c58aa482433913fb074586",
"size": "3600",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/withings/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
} |
import AStar
import CommExplore
import ConfigFileReader
# grid = GridWorld.GridWorld(10, 10, [[4, 4], [4, 5]])
# # print grid.get8Neighbors(8, 8)
# aStar = AStar.AStar()
# start = (4, 6)
# goal = (4, 0)
# path, cost = aStar.aStarSearch(grid, start, goal)
# print 'path: ', path
# print 'cost: ', cost[goal]
# print grid.checkCommand(9, 9, 0)
obstacles = [[0, 0], [1, 0],[2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 8]]
initLocs = [[0, 9], [1, 9]]
# initLocs = [[2, 3], [3, 4]]
# print algo.isObstacleEncountered()
# print algo.isWithinR([4, 3])
# print algo.computeUtility([4, 4])
# print algo.gridworld.inBounds((0, 9))
# print algo.gridworld.inBounds((1, 9))
# print algo.gridworld.inBounds((0, 8))
# for j in range(1000):
# algo.runOneIter()
# algo.printVisitedStatus()
cfgReader = ConfigFileReader.ConfigFileReader("barmaze.config")
ret, height, width, numRobots, R, baseX, baseY, initLocs, obstacles = cfgReader.readCfg()
if ret == -1:
print 'readCfg() Unsuccessful!'
sys.exit(-1)
print 'height', height
print 'width', width
print 'numRobots', numRobots
print 'R', R
print 'baseX', baseX
print 'baseY', baseY
print 'initLocs', initLocs
print 'obstacles', obstacles
k = 10
T = 100
algo = CommExplore.CommExplore(height, width, obstacles, numRobots, initLocs, R, k)
algo.printGrid()
print ''
print ''
cfgc = algo.generateCfgcPopulation()
for j in range(1000):
algo.runOneIter()
algo.printVisitedStatus()
# # The MIT License (MIT)
# # Copyright (c) 2014 INSPIRE Lab, BITS Pilani
# # Permission is hereby granted, free of charge, to any person obtaining a copy
# # of this software and associated documentation files (the "Software"), to deal
# # in the Software without restriction, including without limitation the rights
# # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# # copies of the Software, and to permit persons to whom the Software is
# # furnished to do so, subject to the following conditions:
# # The above copyright notice and this permission notice shall be included in all
# # copies or substantial portions of the Software.
# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# # SOFTWARE.
# """
# Provides a demo of the Communicative Exploration algorithm for a fixed base station.
# """
# from math import floor
# from time import sleep
# import time
# import Tkinter
# import AStar
# import CommExplore
# import ConfigFileReader
# import GridUI
# cfgReader = ConfigFileReader.ConfigFileReader("freeworld.config")
# ret, height, width, numRobots, R, baseX, baseY, initLocs, obstacles = cfgReader.readCfg()
# if ret == -1:
# print 'readCfg() Unsuccessful!'
# sys.exit(-1)
# # print 'height', height
# # print 'width', width
# # print 'numRobots', numRobots
# # print 'R', R
# # print 'baseX', baseX
# # print 'baseY', baseY
# # print 'initLocs', initLocs
# # print 'obstacles', obstacles
# k = 10
# T = 100
# algo = CommExplore.CommExplore(height, width, obstacles, numRobots, initLocs, R, k)
# algo.printGrid()
# print ''
# print ''
# cfgc = algo.generateCfgcPopulation()
# for j in range(1000):
# algo.runOneIter()
# algo.printVisitedStatus()
# ##
# # algo.runOneIter()
# # algo.runOneIter()
# # algo.runOneIter()
# # algo.runOneIter()
# # print 'frontier', algo.frontier
# ##
# # if height <= 10:
# # xoffset = 300
# # else:
# # xoffset = 100
# # if width <= 10:
# # yoffset = 300
# # else:
# # yoffset = 100
# # maxScreenHeight = 700
# # cellSize = int(floor(maxScreenHeight / (height + 2)))
# # root = Tkinter.Tk()
# # # ex = Example(root)
# # # root.geometry('400x100+500+500')
# # # root.mainloop()
# # gui = GridUI.GridUI(root, height, width, cellSize, algo.gridworld, algo.robots, algo.frontier)
# # guiHeight = str((height + 2) * cellSize)
# # guiWidth = str((width + 2) * cellSize)
# # xOffset = str(xoffset)
# # yOffset = str(yoffset)
# # geometryParam = guiWidth + 'x' + guiHeight + '+' + xOffset + '+' + yOffset
# # root.geometry(geometryParam)
# # def hello():
# # print 'Hello'
# # root.after(500, hello)
# # def run():
# # algo.runOneIter()
# # gui.redraw(height, width, cellSize, algo.gridworld, algo.robots, algo.frontier)
# # root.after(500, run)
# # root.after(500, run)
# # root.mainloop() | {
"content_hash": "8721ece07b5fd9b8d8b933d20e9231d7",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 98,
"avg_line_length": 26.310734463276837,
"alnum_prop": 0.6776894996779043,
"repo_name": "krrish94/commExplore",
"id": "49b4f10e1b1955b2d4d8a6f235f5407f946dff37",
"size": "4657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41901"
}
],
"symlink_target": ""
} |
from version import __version__
| {
"content_hash": "b7663250d175e27e0001595cc5598f54",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.75,
"repo_name": "accuen/rqt",
"id": "c6bf7a63fc64f60e25169cf398467d1833319c0e",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rqt/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46233"
}
],
"symlink_target": ""
} |
import unittest
import os
import textract
class No_Ext_TestCase(unittest.TestCase):
def test_docx(self):
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
docx_file = os.path.join(current_dir, "tests/no_ext/docx_paragraphs_and_tables")
# pass the file without extension and provide the extension as a parameter
text = textract.process(docx_file, extension='docx')
print(text)
def test_msg(self):
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
msg_file = os.path.join(current_dir, "tests/no_ext/msg_standardized_text")
# pass the file without extension and provide the extension as a parameter
text = textract.process(msg_file, extension='msg')
print(text)
def test_pdf(self):
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
pdf_file = os.path.join(current_dir, "tests/no_ext/pdf_standardized_text")
# pass the file without extension and provide the extension as a parameter
text = textract.process(pdf_file, extension='.pdf')
print(text)
| {
"content_hash": "11edceb7a76121bf4d5dac9843d1f8c3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.6692708333333334,
"repo_name": "deanmalmgren/textract",
"id": "cf47970ffc3a3a0cb7907122ff34ac7c59a19297",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_no_ext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "521"
},
{
"name": "HTML",
"bytes": "491919"
},
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "PostScript",
"bytes": "968"
},
{
"name": "Python",
"bytes": "58239"
},
{
"name": "Rich Text Format",
"bytes": "78792"
},
{
"name": "Shell",
"bytes": "3383"
}
],
"symlink_target": ""
} |
from keystoneclient.generic import client
from keystoneclient.tests.unit.v2_0 import utils
class DiscoverKeystoneTests(utils.UnauthenticatedTestCase):
def setUp(self):
super(DiscoverKeystoneTests, self).setUp()
self.TEST_RESPONSE_DICT = {
"versions": {
"values": [{
"id": "v2.0",
"status": "beta",
"updated": "2011-11-19T00:00:00Z",
"links": [
{"rel": "self",
"href": "http://127.0.0.1:5000/v2.0/", },
{"rel": "describedby",
"type": "text/html",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/content/", },
{"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"identity-dev-guide-2.0.pdf", },
{"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://127.0.0.1:5000/v2.0/identity.wadl", }
],
"media-types": [{
"base": "application/xml",
"type": "application/vnd.openstack.identity-v2.0+xml",
}, {
"base": "application/json",
"type": "application/vnd.openstack.identity-v2.0+json",
}],
}],
},
}
def test_get_versions(self):
self.stub_url('GET', base_url=self.TEST_ROOT_URL,
json=self.TEST_RESPONSE_DICT)
# Creating a HTTPClient not using session is deprecated.
with self.deprecations.expect_deprecations_here():
cs = client.Client()
versions = cs.discover(self.TEST_ROOT_URL)
self.assertIsInstance(versions, dict)
self.assertIn('message', versions)
self.assertIn('v2.0', versions)
self.assertEqual(
versions['v2.0']['url'],
self.TEST_RESPONSE_DICT['versions']['values'][0]['links'][0]
['href'])
def test_get_version_local(self):
self.stub_url('GET', base_url="http://localhost:35357/",
json=self.TEST_RESPONSE_DICT)
# Creating a HTTPClient not using session is deprecated.
with self.deprecations.expect_deprecations_here():
cs = client.Client()
versions = cs.discover()
self.assertIsInstance(versions, dict)
self.assertIn('message', versions)
self.assertIn('v2.0', versions)
self.assertEqual(
versions['v2.0']['url'],
self.TEST_RESPONSE_DICT['versions']['values'][0]['links'][0]
['href'])
| {
"content_hash": "ce651defa3ac0de41e77e5e27a37871b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 42.12676056338028,
"alnum_prop": 0.4744232698094283,
"repo_name": "klmitch/python-keystoneclient",
"id": "5afe59ab1db7c8b72be5dc3d0250f0c76f931e26",
"size": "3565",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystoneclient/tests/unit/v2_0/test_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1230691"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import platform
from gppylib.commands.base import Command
def build_info():
"""
@description: This is mainly for build integration.
We build each platform accordingly:
RHEL5 = RHEL5-x86_64
RHEL6 = RHEL6-x86_64
OSX = OSX-i386
SUSE = SuSE10-x86_64
SOL = SOL-x86_64
@return: related installation file name
@rtype : String
"""
os_name = get_info()
if os_name == 'RHEL5':
url_bin = 'RHEL5-x86_64'
elif os_name == 'RHEL6':
url_bin = 'RHEL5-x86_64'
elif os_name == 'OSX':
url_bin = 'OSX-i386'
elif os_name == 'SUSE':
url_bin = 'SuSE10-x86_64'
elif os_name == 'SOL':
url_bin = 'SOL-x86_64'
else:
raise Exception("We do not support this platform")
return url_bin
def get_info():
"""
Get the current platform
@return: type platform of the current system
@rtype : String
"""
myos = platform.system()
if myos == "Darwin":
return 'OSX'
elif myos == "Linux":
if os.path.exists("/etc/SuSE-release"):
return 'SUSE'
elif os.path.exists("/etc/redhat-release"):
cmd_str = "cat /etc/redhat-release"
cmd = Command("run cat for RHEL version", cmd_str)
cmd.run()
result = cmd.get_results()
msg = result.stdout
if msg.find("5") != -1:
return 'RHEL5'
else:
return 'RHEL6'
elif myos == "SunOS":
return 'SOL'
return None
| {
"content_hash": "b18fdc61553778d37e96646552771f29",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 72,
"avg_line_length": 30.135135135135137,
"alnum_prop": 0.6134529147982063,
"repo_name": "CraigHarris/gpdb",
"id": "34dffcd8a776b019a65cada815867821b3edbcbd",
"size": "2230",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/test/tinc/tinctest/lib/gpplatform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5196"
},
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "C",
"bytes": "35172475"
},
{
"name": "C++",
"bytes": "8253554"
},
{
"name": "CMake",
"bytes": "47394"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "928387"
},
{
"name": "HTML",
"bytes": "218703"
},
{
"name": "Java",
"bytes": "1011277"
},
{
"name": "Lex",
"bytes": "210708"
},
{
"name": "M4",
"bytes": "106028"
},
{
"name": "Makefile",
"bytes": "497812"
},
{
"name": "Objective-C",
"bytes": "7799"
},
{
"name": "PLSQL",
"bytes": "236252"
},
{
"name": "PLpgSQL",
"bytes": "53471803"
},
{
"name": "Perl",
"bytes": "4082990"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "9788722"
},
{
"name": "Roff",
"bytes": "703079"
},
{
"name": "Ruby",
"bytes": "4910"
},
{
"name": "SQLPL",
"bytes": "3870842"
},
{
"name": "Shell",
"bytes": "504133"
},
{
"name": "XS",
"bytes": "8309"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "485235"
}
],
"symlink_target": ""
} |
import datetime
import json
from couchdbkit import ResourceConflict
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ungettext
from couchexport.models import SavedExportSchema
from corehq import privileges
from corehq.apps.accounting.utils import (
get_active_reminders_by_domain_name,
log_accounting_error,
get_privileges,
)
from corehq.apps.app_manager.dbaccessors import get_all_apps
from corehq.apps.app_manager.models import Application
from corehq.apps.cloudcare.dbaccessors import get_cloudcare_apps
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
from corehq.apps.domain.models import Domain
from corehq.apps.fixtures.models import FixtureDataType
from corehq.apps.hqmedia.models import HQMediaMixin
from corehq.apps.reminders.models import METHOD_SMS_SURVEY, METHOD_IVR_SURVEY
from corehq.apps.users.models import CommCareUser, UserRole
from corehq.apps.userreports.exceptions import DataSourceConfigurationNotFoundError
from corehq.const import USER_DATE_FORMAT
class BaseModifySubscriptionHandler(object):
def __init__(self, domain, new_plan_version, changed_privs, date_start=None):
self.domain = domain if isinstance(domain, Domain) else Domain.get_by_name(domain)
self.date_start = date_start or datetime.date.today()
self.new_plan_version = new_plan_version
self.privileges = filter(lambda x: x in self.supported_privileges(), changed_privs)
def get_response(self):
responses = []
for privilege in self.privileges:
try:
response = self.privilege_to_response_function()[privilege](self.domain, self.new_plan_version)
except ResourceConflict:
# Something else updated the domain. Reload and try again.
self.domain = Domain.get_by_name(self.domain.name)
response = self.privilege_to_response_function()[privilege](self.domain, self.new_plan_version)
if response is not None:
responses.append(response)
return responses
@property
def action_type(self):
raise NotImplementedError
@classmethod
def privilege_to_response_function(cls):
raise NotImplementedError
@classmethod
def supported_privileges(cls):
return cls.privilege_to_response_function().keys()
class BaseModifySubscriptionActionHandler(BaseModifySubscriptionHandler):
def get_response(self):
response = super(BaseModifySubscriptionActionHandler, self).get_response()
return len(filter(lambda x: not x, response)) == 0
# TODO - cache
def _active_reminders(domain):
return get_active_reminders_by_domain_name(domain.name)
class DomainDowngradeActionHandler(BaseModifySubscriptionActionHandler):
"""
This carries out the downgrade action based on each privilege.
Each response should return a boolean.
"""
action_type = "downgrade"
@classmethod
def privilege_to_response_function(cls):
privs_to_responses = {
privileges.OUTBOUND_SMS: cls.response_outbound_sms,
privileges.INBOUND_SMS: cls.response_inbound_sms,
privileges.ROLE_BASED_ACCESS: cls.response_role_based_access,
privileges.DATA_CLEANUP: cls.response_data_cleanup,
privileges.COMMCARE_LOGO_UPLOADER: cls.response_commcare_logo_uploader,
privileges.ADVANCED_DOMAIN_SECURITY: cls.response_domain_security,
}
privs_to_responses.update({
p: cls.response_report_builder
for p in privileges.REPORT_BUILDER_ADD_ON_PRIVS
})
return privs_to_responses
@staticmethod
def response_outbound_sms(domain, new_plan_version):
"""
Reminder rules will be deactivated.
"""
try:
for reminder in _active_reminders(domain):
reminder.active = False
reminder.save()
except Exception:
log_accounting_error(
"Failed to downgrade outbound sms for domain %s."
% domain.name
)
return False
return True
@staticmethod
def response_inbound_sms(domain, new_plan_version):
"""
All Reminder rules utilizing "survey" will be deactivated.
"""
try:
surveys = filter(
lambda x: x.method in [METHOD_IVR_SURVEY, METHOD_SMS_SURVEY],
_active_reminders(domain)
)
for survey in surveys:
survey.active = False
survey.save()
except Exception:
log_accounting_error(
"Failed to downgrade inbound sms for domain %s."
% domain.name
)
return False
return True
@staticmethod
def response_role_based_access(domain, new_plan_version):
"""
Perform Role Based Access Downgrade
- Archive custom roles.
- Set user roles using custom roles to Read Only.
- Reset initial roles to standard permissions.
"""
custom_roles = [r.get_id for r in UserRole.get_custom_roles_by_domain(domain.name)]
if not custom_roles:
return True
# temporarily disable this part of the downgrade until we
# have a better user experience for notifying the downgraded user
# read_only_role = UserRole.get_read_only_role_by_domain(self.domain.name)
# web_users = WebUser.by_domain(self.domain.name)
# for web_user in web_users:
# if web_user.get_domain_membership(self.domain.name).role_id in custom_roles:
# web_user.set_role(self.domain.name, read_only_role.get_qualified_id())
# web_user.save()
# for cc_user in CommCareUser.by_domain(self.domain.name):
# if cc_user.get_domain_membership(self.domain.name).role_id in custom_roles:
# cc_user.set_role(self.domain.name, 'none')
# cc_user.save()
UserRole.archive_custom_roles_for_domain(domain.name)
UserRole.reset_initial_roles_for_domain(domain.name)
return True
@staticmethod
def response_data_cleanup(domain, new_plan_version):
"""
Any active automatic case update rules should be deactivated.
"""
try:
AutomaticUpdateRule.objects.filter(
domain=domain.name,
deleted=False,
active=True,
).update(active=False)
return True
except Exception:
log_accounting_error(
"Failed to deactivate automatic update rules "
"for domain %s." % domain.name
)
return False
@staticmethod
def response_commcare_logo_uploader(domain, new_plan_version):
"""Make sure no existing applications are using a logo.
"""
try:
for app in get_all_apps(domain.name):
if isinstance(app, Application):
has_archived = app.archive_logos()
if has_archived:
app.save()
return True
except Exception:
log_accounting_error(
"Failed to remove all commcare logos for domain %s."
% domain.name
)
return False
@staticmethod
def response_domain_security(domain, new_plan_version):
if domain.two_factor_auth or domain.secure_sessions or domain.strong_mobile_passwords:
domain.two_factor_auth = False
domain.secure_sessions = False
domain.strong_mobile_passwords = False
domain.save()
@staticmethod
def response_report_builder(project, new_plan_version):
if not _has_report_builder_add_on(new_plan_version):
# Clear paywall flags
project.requested_report_builder_trial = []
project.requested_report_builder_subscription = []
project.save()
# Deactivate all report builder data sources
builder_reports = _get_report_builder_reports(project)
for report in builder_reports:
try:
report.config.deactivate()
except DataSourceConfigurationNotFoundError:
pass
report.visible = False
report.save()
return True
class DomainUpgradeActionHandler(BaseModifySubscriptionActionHandler):
"""
This carries out the upgrade action based on each privilege.
Each response should return a boolean.
"""
action_type = "upgrade"
@classmethod
def privilege_to_response_function(cls):
privs_to_repsones = {
privileges.ROLE_BASED_ACCESS: cls.response_role_based_access,
privileges.COMMCARE_LOGO_UPLOADER: cls.response_commcare_logo_uploader,
}
privs_to_repsones.update({
p: cls.response_report_builder
for p in privileges.REPORT_BUILDER_ADD_ON_PRIVS
})
return privs_to_repsones
@staticmethod
def response_role_based_access(domain, new_plan_version):
"""
Perform Role Based Access Upgrade
- Un-archive custom roles.
"""
UserRole.unarchive_roles_for_domain(domain.name)
return True
@staticmethod
def response_commcare_logo_uploader(domain, new_plan_version):
"""Make sure no existing applications are using a logo.
"""
try:
for app in get_all_apps(domain.name):
if isinstance(app, HQMediaMixin):
has_restored = app.restore_logos()
if has_restored:
app.save()
return True
except Exception:
log_accounting_error(
"Failed to restore all commcare logos for domain %s."
% domain.name
)
return False
@staticmethod
def response_report_builder(project, new_plan_version):
from corehq.apps.userreports.models import ReportConfiguration
from corehq.apps.userreports.tasks import rebuild_indicators
reports = ReportConfiguration.by_domain(project.name)
builder_reports = filter(lambda report: report.report_meta.created_by_builder, reports)
for report in builder_reports:
try:
report.visible = True
report.save()
if report.config.is_deactivated:
report.config.is_deactivated = False
report.config.save()
rebuild_indicators.delay(report.config._id)
except DataSourceConfigurationNotFoundError:
pass
return True
# TODO - cache
def _active_reminder_methods(domain):
reminder_rules = get_active_reminders_by_domain_name(domain.name)
return [reminder.method for reminder in reminder_rules]
def _fmt_alert(message, details=None):
if details is not None and not isinstance(details, list):
raise ValueError("details should be a list.")
return {
'message': message,
'details': details,
}
def _has_report_builder_add_on(plan_version):
"""
Return True if the given SoftwarePlanVersion has a report builder add-on
privilege.
"""
privs = get_privileges(plan_version) if plan_version is not None else set()
return bool(privileges.REPORT_BUILDER_ADD_ON_PRIVS.intersection(privs))
def _get_report_builder_reports(project):
from corehq.apps.userreports.models import ReportConfiguration
reports = ReportConfiguration.by_domain(project.name)
return filter(
lambda report: report.report_meta.created_by_builder,
reports
)
class DomainDowngradeStatusHandler(BaseModifySubscriptionHandler):
"""
This returns a list of alerts for the user if their current domain is using features that
will be removed during the downgrade.
"""
action_type = "notification"
@classmethod
def privilege_to_response_function(cls):
privs_to_responses = {
privileges.CLOUDCARE: cls.response_cloudcare,
privileges.LOOKUP_TABLES: cls.response_lookup_tables,
privileges.CUSTOM_BRANDING: cls.response_custom_branding,
privileges.OUTBOUND_SMS: cls.response_outbound_sms,
privileges.INBOUND_SMS: cls.response_inbound_sms,
privileges.DEIDENTIFIED_DATA: cls.response_deidentified_data,
privileges.ROLE_BASED_ACCESS: cls.response_role_based_access,
privileges.DATA_CLEANUP: cls.response_data_cleanup,
privileges.ADVANCED_DOMAIN_SECURITY: cls.response_domain_security,
}
privs_to_responses.update({
p: cls.response_report_builder
for p in privileges.REPORT_BUILDER_ADD_ON_PRIVS
})
return privs_to_responses
def get_response(self):
response = super(DomainDowngradeStatusHandler, self).get_response()
response.extend(filter(
lambda response: response is not None,
[
self.response_later_subscription,
self.response_mobile_worker_creation,
]
))
return response
@staticmethod
def response_cloudcare(domain, new_plan_version):
"""
CloudCare enabled apps will have cloudcare_enabled set to false on downgrade.
"""
cloudcare_enabled_apps = get_cloudcare_apps(domain.name)
if not cloudcare_enabled_apps:
return None
num_apps = len(cloudcare_enabled_apps)
return _fmt_alert(
ungettext(
"You have %(num_apps)d application that will lose CloudCare "
"access if you select this plan.",
"You have %(num_apps)d applications that will lose CloudCare "
"access if you select this plan.",
num_apps
) % {
'num_apps': num_apps,
},
[mark_safe('<a href="%(url)s">%(title)s</a>') % {
'title': app['name'],
'url': reverse('view_app', args=[domain.name, app['_id']])
} for app in cloudcare_enabled_apps],
)
@staticmethod
def response_lookup_tables(domain, new_plan_version):
"""
Lookup tables will be deleted on downgrade.
"""
num_fixtures = FixtureDataType.total_by_domain(domain.name)
if num_fixtures > 0:
return _fmt_alert(
ungettext(
"You have %(num_fix)s Lookup Table set up. Selecting this "
"plan will delete this Lookup Table.",
"You have %(num_fix)s Lookup Tables set up. Selecting "
"this plan will delete these Lookup Tables.",
num_fixtures
) % {'num_fix': num_fixtures}
)
@staticmethod
def response_custom_branding(domain, new_plan_version):
"""
Custom logos will be removed on downgrade.
"""
if domain.has_custom_logo:
return _fmt_alert(_(
"You are using custom branding. "
"Selecting this plan will remove this feature."
))
@staticmethod
def response_outbound_sms(domain, new_plan_version):
"""
Reminder rules will be deactivated.
"""
num_active = len(_active_reminder_methods(domain))
if num_active > 0:
return _fmt_alert(
ungettext(
"You have %(num_active)d active Reminder Rule. Selecting "
"this plan will deactivate this rule.",
"You have %(num_active)d active Reminder Rules. Selecting "
"this plan will deactivate these rules.",
num_active
) % {
'num_active': num_active,
}
)
@staticmethod
def response_inbound_sms(domain, new_plan_version):
"""
All Reminder rules utilizing "survey" will be deactivated.
"""
surveys = filter(lambda x: x in [METHOD_IVR_SURVEY, METHOD_SMS_SURVEY], _active_reminder_methods(domain))
num_survey = len(surveys)
if num_survey > 0:
return _fmt_alert(
ungettext(
"You have %(num_active)d active Reminder Rule for a Survey. "
"Selecting this plan will deactivate this rule.",
"You have %(num_active)d active Reminder Rules for a Survey. "
"Selecting this plan will deactivate these rules.",
num_survey
) % {
'num_active': num_survey,
}
)
@staticmethod
def response_deidentified_data(domain, new_plan_version):
"""
De-id exports will be hidden
"""
startkey = json.dumps([domain.name, ""])[:-3]
endkey = "%s{" % startkey
reports = SavedExportSchema.view(
"couchexport/saved_export_schemas",
startkey=startkey,
endkey=endkey,
include_docs=True,
reduce=False,
)
num_deid_reports = len(filter(lambda r: r.is_safe, reports))
if num_deid_reports > 0:
return _fmt_alert(
ungettext(
"You have %(num)d De-Identified Export. Selecting this "
"plan will remove it.",
"You have %(num)d De-Identified Exports. Selecting this "
"plan will remove them.",
num_deid_reports
) % {
'num': num_deid_reports,
}
)
@property
def response_mobile_worker_creation(self):
"""
Get the allowed number of mobile workers based on plan version.
"""
from corehq.apps.accounting.models import FeatureType, FeatureRate, UNLIMITED_FEATURE_USAGE
num_users = CommCareUser.total_by_domain(self.domain.name, is_active=True)
try:
user_rate = self.new_plan_version.feature_rates.filter(
feature__feature_type=FeatureType.USER).latest('date_created')
if user_rate.monthly_limit == UNLIMITED_FEATURE_USAGE:
return
num_allowed = user_rate.monthly_limit
num_extra = num_users - num_allowed
if num_extra > 0:
return _fmt_alert(
ungettext(
"You have %(num_users)d Mobile Worker over the monthly "
"limit of %(monthly_limit)d for this new plan. There "
"will be an additional monthly charge of USD "
"%(excess_fee)s per Mobile Worker, totalling USD "
"%(monthly_total)s per month, if you select this plan.",
"You have %(num_users)d Mobile Workers over the "
"monthly limit of %(monthly_limit)d for this new plan. "
"There will be an additional monthly charge "
"of USD %(excess_fee)s per Mobile Worker, totalling "
"USD %(monthly_total)s per month, if you "
"select this plan.",
num_extra
) % {
'num_users': num_extra,
'monthly_limit': user_rate.monthly_limit,
'excess_fee': user_rate.per_excess_fee,
'monthly_total': user_rate.per_excess_fee * num_extra,
}
)
except FeatureRate.DoesNotExist:
log_accounting_error(
"It seems that the plan %s did not have rate for Mobile "
"Workers. This is problematic."
% self.new_plan_version.plan.name
)
@staticmethod
def response_role_based_access(domain, new_plan_version):
"""
Alert the user if there are currently custom roles set up for the domain.
"""
custom_roles = [r.name for r in UserRole.get_custom_roles_by_domain(domain.name)]
num_roles = len(custom_roles)
if num_roles > 0:
return _fmt_alert(
ungettext(
"You have %(num_roles)d Custom Role configured for your "
"project. If you select this plan, all users with that "
"role will change to having the Read Only role.",
"You have %(num_roles)d Custom Roles configured for your "
"project . If you select this plan, all users with these "
"roles will change to having the Read Only role.",
num_roles
) % {
'num_roles': num_roles,
}, custom_roles)
@property
def response_later_subscription(self):
"""
Alert the user if they have subscriptions scheduled to start
in the future.
"""
from corehq.apps.accounting.models import Subscription
later_subs = Subscription.objects.filter(
subscriber__domain=self.domain.name,
date_start__gt=self.date_start
).order_by('date_start')
if later_subs.exists():
next_subscription = later_subs[0]
plan_desc = next_subscription.plan_version.user_facing_description
return _fmt_alert(_(
"You have a subscription SCHEDULED TO START on %(date_start)s. "
"Changing this plan will CANCEL that %(plan_name)s "
"subscription."
) % {
'date_start': next_subscription.date_start.strftime(USER_DATE_FORMAT),
'plan_name': plan_desc['name'],
})
@staticmethod
def response_data_cleanup(domain, new_plan_version):
"""
Any active automatic case update rules should be deactivated.
"""
rule_count = AutomaticUpdateRule.objects.filter(
domain=domain.name,
deleted=False,
active=True,
).count()
if rule_count > 0:
return _fmt_alert(
ungettext(
"You have %(rule_count)d automatic case update rule "
"configured in your project. If you select this plan, "
"this rule will be deactivated.",
"You have %(rule_count)d automatic case update rules "
"configured in your project. If you select this plan, "
"these rules will be deactivated.",
rule_count
) % {
'rule_count': rule_count,
}
)
@staticmethod
def response_domain_security(domain, new_plan_version):
"""
turn off any domain enforced security features and alert user of deactivated features
"""
two_factor = domain.two_factor_auth
secure_sessions = domain.secure_sessions
strong_mobile_passwords = domain.strong_mobile_passwords
msgs = []
if secure_sessions:
msgs.append(_("Your project has enabled a 30 minute session timeout setting. "
"By changing to a different plan, you will lose the ability to "
"enforce this shorter timeout policy."))
if two_factor:
msgs.append(_("Two factor authentication is currently required of all of your "
"web users for this project space. By changing to a different "
"plan you will lose the ability to enforce this requirement. "
"However, any web user who still wants to use two factor "
"authentication will be able to continue using it."))
if strong_mobile_passwords:
msgs.append(_("Your project currently requires all mobile workers to have "
"strong passwords. By changing to a different plan, you will "
"lose the ability to enforce these password requirements."))
if msgs:
return _fmt_alert(
_("The following security features will be affected if you select this plan:"),
msgs
)
@staticmethod
def response_report_builder(project, new_plan_version):
if not _has_report_builder_add_on(new_plan_version):
reports = _get_report_builder_reports(project)
if reports:
return _fmt_alert(_(
"You have %(number_of_reports) report builder reports."
"By selecting this plan you will lose access to those reports."
) % {'number_of_reports': len(reports)})
| {
"content_hash": "3edb7e7627069689c81b2bea08d92fa6",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 113,
"avg_line_length": 39.33646322378717,
"alnum_prop": 0.5821928707829408,
"repo_name": "qedsoftware/commcare-hq",
"id": "3f3ba6cbcb56ebfc220ab31166aff130f4234e49",
"size": "25136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/accounting/subscription_changes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
"""
Geographically weighted regression
"""
import numpy as np
from .gwr.base.gwr import GWR as PySAL_GWR
from .gwr.base.sel_bw import Sel_BW
import json
from crankshaft.analysis_data_provider import AnalysisDataProvider
import plpy
class GWR:
def __init__(self, data_provider=None):
if data_provider:
self.data_provider = data_provider
else:
self.data_provider = AnalysisDataProvider()
def gwr(self, subquery, dep_var, ind_vars,
bw=None, fixed=False, kernel='bisquare',
geom_col='the_geom', id_col='cartodb_id'):
"""
subquery: 'select * from demographics'
dep_var: 'pctbachelor'
ind_vars: ['intercept', 'pctpov', 'pctrural', 'pctblack']
bw: value of bandwidth, if None then select optimal
fixed: False (kNN) or True ('distance')
kernel: 'bisquare' (default), or 'exponential', 'gaussian'
"""
params = {'geom_col': geom_col,
'id_col': id_col,
'subquery': subquery,
'dep_var': dep_var,
'ind_vars': ind_vars}
# get data from data provider
query_result = self.data_provider.get_gwr(params)
# exit if data to analyze is empty
if len(query_result) == 0:
plpy.error('No data passed to analysis or independent variables '
'are all null-valued')
# unique ids and variable names list
rowid = np.array(query_result[0]['rowid'], dtype=np.int)
# x, y are centroids of input geometries
x = np.array(query_result[0]['x'], dtype=np.float)
y = np.array(query_result[0]['y'], dtype=np.float)
coords = list(zip(x, y))
# extract dependent variable
Y = np.array(query_result[0]['dep_var'], dtype=np.float).reshape((-1, 1))
n = Y.shape[0]
k = len(ind_vars)
X = np.zeros((n, k))
# extract query result
for attr in range(0, k):
attr_name = 'attr' + str(attr + 1)
X[:, attr] = np.array(
query_result[0][attr_name], dtype=np.float).flatten()
# add intercept variable name
ind_vars.insert(0, 'intercept')
# calculate bandwidth if none is supplied
if bw is None:
bw = Sel_BW(coords, Y, X,
fixed=fixed, kernel=kernel).search()
model = PySAL_GWR(coords, Y, X, bw,
fixed=fixed, kernel=kernel).fit()
# containers for outputs
coeffs = []
stand_errs = []
t_vals = []
filtered_t_vals = []
# extracted model information
c_alpha = model.adj_alpha
filtered_t = model.filter_tvals(c_alpha[1])
predicted = model.predy.flatten()
residuals = model.resid_response
r_squared = model.localR2.flatten()
bw = np.repeat(float(bw), n)
# create lists of json objs for model outputs
for idx in range(n):
coeffs.append(json.dumps({var: model.params[idx, k]
for k, var in enumerate(ind_vars)}))
stand_errs.append(json.dumps({var: model.bse[idx, k]
for k, var in enumerate(ind_vars)}))
t_vals.append(json.dumps({var: model.tvalues[idx, k]
for k, var in enumerate(ind_vars)}))
filtered_t_vals.append(
json.dumps({var: filtered_t[idx, k]
for k, var in enumerate(ind_vars)}))
return list(zip(coeffs, stand_errs, t_vals, filtered_t_vals,
predicted, residuals, r_squared, bw, rowid))
def gwr_predict(self, subquery, dep_var, ind_vars,
bw=None, fixed=False, kernel='bisquare',
geom_col='the_geom', id_col='cartodb_id'):
"""
subquery: 'select * from demographics'
dep_var: 'pctbachelor'
ind_vars: ['intercept', 'pctpov', 'pctrural', 'pctblack']
bw: value of bandwidth, if None then select optimal
fixed: False (kNN) or True ('distance')
kernel: 'bisquare' (default), or 'exponential', 'gaussian'
"""
params = {'geom_col': geom_col,
'id_col': id_col,
'subquery': subquery,
'dep_var': dep_var,
'ind_vars': ind_vars}
# get data from data provider
query_result = self.data_provider.get_gwr_predict(params)
# exit if data to analyze is empty
if len(query_result) == 0:
plpy.error('No data passed to analysis or independent variables '
'are all null-valued')
# unique ids and variable names list
rowid = np.array(query_result[0]['rowid'], dtype=np.int)
x = np.array(query_result[0]['x'], dtype=np.float)
y = np.array(query_result[0]['y'], dtype=np.float)
coords = np.array(list(zip(x, y)), dtype=np.float)
# extract dependent variable
Y = np.array(query_result[0]['dep_var']).reshape((-1, 1))
n = Y.shape[0]
k = len(ind_vars)
X = np.empty((n, k), dtype=np.float)
for attr in range(0, k):
attr_name = 'attr' + str(attr + 1)
X[:, attr] = np.array(
query_result[0][attr_name], dtype=np.float).flatten()
# add intercept variable name
ind_vars.insert(0, 'intercept')
# split data into "training" and "test" for predictions
# create index to split based on null y values
train = np.where(Y != np.array(None))[0]
test = np.where(Y == np.array(None))[0]
# report error if there is no data to predict
if len(test) < 1:
plpy.error('No rows flagged for prediction: verify that rows '
'denoting prediction locations have a dependent '
'variable value of `null`')
# split dependent variable (only need training which is non-Null's)
Y_train = Y[train].reshape((-1, 1))
Y_train = Y_train.astype(np.float)
# split coords
coords_train = coords[train]
coords_test = coords[test]
# split explanatory variables
X_train = X[train]
X_test = X[test]
# calculate bandwidth if none is supplied
if bw is None:
bw = Sel_BW(coords_train, Y_train, X_train,
fixed=fixed, kernel=kernel).search()
# estimate model and predict at new locations
model = PySAL_GWR(coords_train, Y_train, X_train,
bw, fixed=fixed,
kernel=kernel).predict(coords_test, X_test)
coeffs = []
stand_errs = []
t_vals = []
r_squared = model.localR2.flatten()
predicted = model.predy.flatten()
m = len(model.predy)
for idx in range(m):
coeffs.append(json.dumps({var: model.params[idx, k]
for k, var in enumerate(ind_vars)}))
stand_errs.append(json.dumps({var: model.bse[idx, k]
for k, var in enumerate(ind_vars)}))
t_vals.append(json.dumps({var: model.tvalues[idx, k]
for k, var in enumerate(ind_vars)}))
return list(zip(coeffs, stand_errs, t_vals,
r_squared, predicted, rowid[test]))
| {
"content_hash": "d3b1aeead0d663a42a07fe487a283101",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 81,
"avg_line_length": 37.31188118811881,
"alnum_prop": 0.5329706779885897,
"repo_name": "CartoDB/crankshaft",
"id": "04e78a3728db6702996ccc326f2c586d295a8288",
"size": "7537",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/py/crankshaft/crankshaft/regression/gwr_cs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "104176"
},
{
"name": "Makefile",
"bytes": "5965"
},
{
"name": "PLpgSQL",
"bytes": "2108153"
},
{
"name": "Python",
"bytes": "4215676"
},
{
"name": "Shell",
"bytes": "5663"
}
],
"symlink_target": ""
} |
"""Definitions for the semantics segment of the Cranelift language."""
from cdsl.ti import TypeEnv, ti_rtl, get_type_env
from cdsl.operands import ImmediateKind
from cdsl.ast import Var
try:
from typing import List, Dict, Tuple # noqa
from cdsl.ast import VarAtomMap # noqa
from cdsl.xform import XForm, Rtl # noqa
from cdsl.ti import VarTyping # noqa
from cdsl.instructions import Instruction, InstructionSemantics # noqa
except ImportError:
pass
def verify_semantics(inst, src, xforms):
# type: (Instruction, Rtl, InstructionSemantics) -> None
"""
Verify that the semantics transforms in xforms correctly describe the
instruction described by the src Rtl. This involves checking that:
0) src is a single instance of inst
1) For all x \\in xforms x.src is a single instance of inst
2) For any concrete values V of Literals in inst:
For all concrete typing T of inst:
Exists single x \\in xforms that applies to src conretazied to
V and T
"""
# 0) The source rtl is always a single instance of inst
assert len(src.rtl) == 1 and src.rtl[0].expr.inst == inst
# 1) For all XForms x, x.src is a single instance of inst
for x in xforms:
assert len(x.src.rtl) == 1 and x.src.rtl[0].expr.inst == inst
variants = [src] # type: List[Rtl]
# 2) For all enumerated immediates, compute all the possible
# versions of src with the concrete value filled in.
for i in inst.imm_opnums:
op = inst.ins[i]
if not (isinstance(op.kind, ImmediateKind) and
op.kind.is_enumerable()):
continue
new_variants = [] # type: List[Rtl]
for rtl_var in variants:
s = {v: v for v in rtl_var.vars()} # type: VarAtomMap
arg = rtl_var.rtl[0].expr.args[i]
assert isinstance(arg, Var)
for val in op.kind.possible_values():
s[arg] = val
new_variants.append(rtl_var.copy(s))
variants = new_variants
# For any possible version of the src with concrete enumerated immediates
for src in variants:
# 2) Any possible typing should be covered by exactly ONE semantic
# XForm
src = src.copy({})
typenv = get_type_env(ti_rtl(src, TypeEnv()))
typenv.normalize()
typenv = typenv.extract()
for t in typenv.concrete_typings():
matching_xforms = [] # type: List[XForm]
for x in xforms:
if src.substitution(x.src, {}) is None:
continue
# Translate t using x.symtab
t = {x.symtab[str(v)]: tv for (v, tv) in t.items()}
if (x.ti.permits(t)):
matching_xforms.append(x)
assert len(matching_xforms) == 1,\
("Possible typing {} of {} not matched by exactly one case " +
": {}").format(t, src.rtl[0], matching_xforms)
| {
"content_hash": "b38f64c1a710bd17fd4ccdf732fb5481",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 39.103896103896105,
"alnum_prop": 0.5978080371969445,
"repo_name": "nrc/rustc-perf",
"id": "1ce6b46712602d49880da34bf99d5b4b9313eff8",
"size": "3011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "collector/benchmarks/cranelift-codegen/cranelift-codegen/meta-python/semantics/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1954"
},
{
"name": "HTML",
"bytes": "26683"
},
{
"name": "JavaScript",
"bytes": "41635"
},
{
"name": "Shell",
"bytes": "114"
}
],
"symlink_target": ""
} |
"""Utils for managing different mode strings used by Keras and Estimator models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections.abc as collections_abc
class KerasModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training/fitting mode.
* `TEST`: testing/evaluation mode.
* `PREDICT`: prediction/inference mode.
"""
TRAIN = 'train'
TEST = 'test'
PREDICT = 'predict'
# TODO(kathywu): Remove copy in Estimator after nightlies
class EstimatorModeKeys(object):
"""Standard names for Estimator model modes.
The following standard keys are defined:
* `TRAIN`: training/fitting mode.
* `EVAL`: testing/evaluation mode.
* `PREDICT`: predication/inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
def is_predict(mode):
return mode in [KerasModeKeys.PREDICT, EstimatorModeKeys.PREDICT]
def is_eval(mode):
return mode in [KerasModeKeys.TEST, EstimatorModeKeys.EVAL]
def is_train(mode):
return mode in [KerasModeKeys.TRAIN, EstimatorModeKeys.TRAIN]
class ModeKeyMap(collections_abc.Mapping):
"""Map using ModeKeys as keys.
This class creates an immutable mapping from modes to values. For example,
SavedModel export of Keras and Estimator models use this to map modes to their
corresponding MetaGraph tags/SignatureDef keys.
Since this class uses modes, rather than strings, as keys, both "predict"
(Keras's PREDICT ModeKey) and "infer" (Estimator's PREDICT ModeKey) map to the
same value.
"""
def __init__(self, **kwargs):
self._internal_dict = {}
self._keys = []
for key in kwargs:
self._keys.append(key)
dict_key = self._get_internal_key(key)
if dict_key in self._internal_dict:
raise ValueError(
'Error creating ModeKeyMap. Multiple keys/values found for {} mode.'
.format(dict_key))
self._internal_dict[dict_key] = kwargs[key]
def _get_internal_key(self, key):
"""Return keys used for the internal dictionary."""
if is_train(key):
return KerasModeKeys.TRAIN
if is_eval(key):
return KerasModeKeys.TEST
if is_predict(key):
return KerasModeKeys.PREDICT
raise ValueError('Invalid mode key: {}.'.format(key))
def __getitem__(self, key):
return self._internal_dict[self._get_internal_key(key)]
def __iter__(self):
return iter(self._keys)
def __len__(self):
return len(self._keys)
# LINT.ThenChange(//tensorflow/python/saved_model/model_utils/mode_keys.py)
| {
"content_hash": "4c4871db1af3c41d55c2385659f9955e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 27.197916666666668,
"alnum_prop": 0.693603983148219,
"repo_name": "petewarden/tensorflow",
"id": "e26bf031513a4ae9a95d220eb3364cb8e7adac35",
"size": "3316",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/saving/utils_v1/mode_keys.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import logging
import os
import pickle
import torch
from fvcore.common.checkpoint import Checkpointer
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
class DetectionCheckpointer(Checkpointer):
"""
Same as :class:`Checkpointer`, but is able to:
1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
2. correctly load checkpoints that are only available on the master worker
"""
def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
is_main_process = comm.is_main_process()
super().__init__(
model,
save_dir,
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
**checkpointables,
)
self.path_manager = PathManager
def load(self, path, *args, **kwargs):
need_sync = False
if path and isinstance(self.model, DistributedDataParallel):
logger = logging.getLogger(__name__)
path = self.path_manager.get_local_path(path)
has_file = os.path.isfile(path)
all_has_file = comm.all_gather(has_file)
if not all_has_file[0]:
raise OSError(f"File {path} not found on main worker.")
if not all(all_has_file):
logger.warning(
f"Not all workers can read checkpoint {path}. "
"Training may fail to fully resume."
)
# TODO: broadcast the checkpoint file contents from main
# worker, and load from it instead.
need_sync = True
if not has_file:
path = None # don't load if not readable
ret = super().load(path, *args, **kwargs)
if need_sync:
logger.info("Broadcasting model states from main worker ...")
self.model._sync_params_and_buffers()
return ret
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
elif filename.endswith(".pyth"):
# assume file is from pycls; no one else seems to use the ".pyth" extension
with PathManager.open(filename, "rb") as f:
data = torch.load(f)
assert (
"model_state" in data
), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
model_state = {
k: v
for k, v in data["model_state"].items()
if not k.endswith("num_batches_tracked")
}
return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
loaded["matching_heuristics"] = True
return loaded
def _load_model(self, checkpoint):
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
checkpoint["model"] = align_and_update_state_dicts(
self.model.state_dict(),
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
for k in incompatible.unexpected_keys[:]:
# Ignore unexpected keys about cell anchors. They exist in old checkpoints
# but now they are non-persistent buffers and will not be in new checkpoints.
if "anchor_generator.cell_anchors" in k:
incompatible.unexpected_keys.remove(k)
return incompatible
| {
"content_hash": "c2e707e023926ffa1610347929f98b51",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 98,
"avg_line_length": 43.391666666666666,
"alnum_prop": 0.5742270021125409,
"repo_name": "facebookresearch/detectron2",
"id": "6620a2b5781a4f2feb24bf320291c5ba17fc52e6",
"size": "5258",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "detectron2/checkpoint/detection_checkpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "79417"
},
{
"name": "CMake",
"bytes": "616"
},
{
"name": "Cuda",
"bytes": "112955"
},
{
"name": "Dockerfile",
"bytes": "3209"
},
{
"name": "Python",
"bytes": "3261609"
},
{
"name": "Shell",
"bytes": "14448"
}
],
"symlink_target": ""
} |
from ConfigParser import ConfigParser
import logging
import re
import sys
import zmq
from longman_parser import XMLParser
class ParserException(Exception):
pass
class Parser(XMLParser):
sen_regex = re.compile(
'<sentence id="[0-9]*">(.*?)</sentence>', re.S)
basic_deps_regex = re.compile(
'<dependencies type="collapsed-ccprocessed-dependencies">(.*?)</dependencies>', re.S) # nopep8
all_corefs_regex = re.compile(
'<coreference>(.*)</coreference>', re.S) # greedy star, has to be
dep_regex = re.compile(
'<dep type="(.*?)">.*?\
<governor idx="([0-9]*)">(.*?)</governor>.*?\
<dependent idx="([0-9]*)">(.*?)</dependent>', re.S)
repr_mention_regex = re.compile(
'<mention representative="true">(.*?)</mention>', re.S)
parse_tree_regex = re.compile('<parse>(.*?)</parse>', re.S)
@staticmethod
def parse_mention(mention):
sen_no = int(Parser.get_section('sentence', mention))
start_index = int(Parser.get_section('start', mention))
head_index = int(Parser.get_section('head', mention))
try:
word = Parser.get_section(
'text', mention).split()[head_index-start_index]
except:
# logging.error('failed on mention: {0}'.format(mention))
raise ParserException()
return word, sen_no
@staticmethod
def parse_corefs(corefs):
parsed_corefs = []
for coref in Parser.iter_sections("coreference", corefs):
repr_mention = Parser.repr_mention_regex.search(coref).group(1)
mentions = Parser.iter_sections('mention', coref)
try:
repr_word, sen_no = Parser.parse_mention(repr_mention)
other_words = map(Parser.parse_mention, mentions)
parsed_corefs.append(((repr_word, sen_no), other_words))
except ParserException:
logging.warning('skipping mention with no text')
# logging.error('failed on coref: {0}'.format(coref))
return parsed_corefs
@staticmethod
def parse_sen(sen):
deps_match = Parser.basic_deps_regex.search(sen)
if deps_match is None:
return []
deps_string = deps_match.group(1)
return [(dep, (word1, id1), (word2, id2))
for dep, id1, word1, id2, word2 in Parser.dep_regex.findall(
deps_string)]
@staticmethod
def parse_corenlp_output(output):
cl_output = output.decode('utf-8').replace(u"\xa0", u" ")
parsed_sens = [Parser.parse_sen(sen)
for sen in Parser.sen_regex.findall(cl_output)]
parse_trees = [match
for match in Parser.parse_tree_regex.findall(cl_output)]
corefs_match = Parser.all_corefs_regex.search(cl_output)
if corefs_match is None:
corefs = []
else:
corefs = Parser.parse_corefs(corefs_match.group(1))
return parsed_sens, corefs, parse_trees
class CoreNLPWrapper():
def __init__(self, cfg, is_server=False):
self.cfg = cfg
zmq_context = zmq.Context()
self.socket = zmq_context.socket(zmq.REQ)
self.socket.connect("tcp://localhost:5900")
def parse_text(self, text):
self.socket.send("process {0}".format(text.encode('utf-8')))
output = self.socket.recv()
return Parser.parse_corenlp_output(output)
def parse_sentences(self, sens):
return self.parse_text("\n".join(sens))
def parse_entries(self, entries):
for entry in entries:
for sense in entry['senses']:
sentence = sense['definition']
if sentence is None:
continue
deps, corefs, parse_trees = self.parse_text(sentence)
sense['definition'] = {
"sen": sentence,
"deps": deps[0],
"parse": parse_trees}
return entries
def test():
cfg_file = 'conf/default.cfg' if len(sys.argv) < 2 else sys.argv[1]
cfg = ConfigParser()
cfg.read([cfg_file])
wrapper = CoreNLPWrapper(cfg)
parsed_sens, corefs = wrapper.parse_text(
open('test/input/mrhug_story.sens').read())
print 'parsed_sens:', parsed_sens
print 'corefs:', corefs
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s : " +
"%(module)s (%(lineno)s) - %(levelname)s - %(message)s")
test()
| {
"content_hash": "2955924e08369257e7a9007d55a97ca3",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 103,
"avg_line_length": 34.42424242424242,
"alnum_prop": 0.5724031690140845,
"repo_name": "recski/4lang",
"id": "3095b037912573905092c131286332d7fe38bb07",
"size": "4544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fourlang/corenlp_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1556"
},
{
"name": "Perl",
"bytes": "1237"
},
{
"name": "Python",
"bytes": "192731"
},
{
"name": "Shell",
"bytes": "3344"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import time
VERSIONING = int(time.time())
# Basic config
AUTHOR = u'Florent Messa'
SITENAME = u'Ulule'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'Europe/Paris'
# Define theme and static path to img folder
THEME = 'themes/ulule'
STATIC_PATHS = [
'themes/ulule/static/build/',
]
DEFAULT_DATE_FORMAT = '%d %B %Y'
# Remove all unwanted files
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
TAGS_SAVE_AS = ''
ARCHIVES_SAVE_AS = ''
FEEDS_SAVE_AS = ''
AUTHOR_SAVE_AS = ''
THEME_SAVE_AS = ''
CATEGORY_SAVE_AS = ''
FEED_DOMAIN = None
FEED_ATOM = None
FEED_RSS = None
FEED_ALL_ATOM = None
FEED_ALL_RSS = None
CATEGORY_FEED_ATOM = None
CATEGORY_FEED_RSS = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
TAG_FEED_ATOM = None
TAG_FEED_RSS = None
DEFAULT_LANG = u'en'
# Social links
SOCIAL_LINKS = (
('twitter', 'https://twitter.com/ulule'),
('github', 'https://github.com/ulule'),
('facebook', 'https://facebook.com/ulule'),
)
BASE_URL = 'http://ulule.engineering'
| {
"content_hash": "f1ef6f947a0e19c0ca365380dce467e1",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 47,
"avg_line_length": 19.576923076923077,
"alnum_prop": 0.6709233791748527,
"repo_name": "ulule/engineering.ulule.com",
"id": "01c5758c5871b3b640b89d828a79abc82972284d",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pelicanconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61131"
},
{
"name": "HTML",
"bytes": "7478"
},
{
"name": "JavaScript",
"bytes": "1633"
},
{
"name": "Makefile",
"bytes": "926"
},
{
"name": "Python",
"bytes": "1066"
}
],
"symlink_target": ""
} |
from os.path import join, dirname
from setuptools import setup
import ymir
setup(
name='ymir',
version=ymir.__version__,
long_description=open(join(dirname(__file__), 'README.md')).read(),
entry_points={
'console_scripts': [
'ymir = ymir:main',
],
},
install_requires=[
"flask",
"sqlalchemy",
"flask-cors",
]
)
| {
"content_hash": "108293873d361056bd37722c97ee42c9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 18.318181818181817,
"alnum_prop": 0.533498759305211,
"repo_name": "skylerberg/ymir",
"id": "97c46320d73453b58b1698893b6682c3f359ef1d",
"size": "426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37303"
},
{
"name": "HTML",
"bytes": "3355"
},
{
"name": "JavaScript",
"bytes": "20079"
},
{
"name": "Python",
"bytes": "20633"
},
{
"name": "RAML",
"bytes": "6880"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
} |
import json
from decimal import Decimal as D
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from ..exceptions import (OriginCityNotFoundError,
CityNotFoundError,
ApiOfflineError,
TooManyFoundError,
CalculationError)
# local cache
origin_code = {}
# this is workaround for that cases when city name was filled in the shipping address form
# via third-party plugins and APIs, such as KLADR-API or Dadata
# and being prefixed with abbreviated settlement type
# that prefix usually separated by dot-space symbols and we should strip it out to make search over city codes
# So put this setting implicitly if you want enable this feature
CITY_PREFIX_SEPARATOR = getattr(settings, 'OSCAR_CITY_PREFIX_SEPARATOR', None)
class AbstractShippingFacade(object):
# instantiated API class from corresponding package
# should be initiated in __init__
api = None
name = ''
def get_cached_origin_code(self, origin):
code = None
cache_key = ':'.join([self.name, origin])
try:
code = origin_code[cache_key]
except KeyError:
pass
if code:
return code
else:
cities, error = self.api.findbytitle(origin)
if not error and len(cities) > 0:
# WARNING! The only first found code used as origin
origin_code[cache_key] = cities[0][0]
return origin_code[cache_key]
else:
raise ImproperlyConfigured("It seems like origin point '%s'"
"could'nt be validated for the method. Errors: %s" % (origin, error))
def get_cached_codes(self, city):
errors = False
codes = []
res = []
cache_key = ':'.join([self.name, city])
res = cache.get(cache_key) # should returns list of tuples like facade do but as json
if not res:
res, errors = self.api.findbytitle(city)
if not errors:
cache.set(cache_key, json.dumps(res))
else:
res = []
else:
res = json.loads(res)
codes = [r[0] for r in res]
if len(codes) > 1:
# return full API answer to let user make a choice
errors = res
return codes, errors
def clean_city_name(self, city):
if CITY_PREFIX_SEPARATOR:
try:
# take all after separator
city = city.split(CITY_PREFIX_SEPARATOR, 1)[1]
except IndexError:
pass
return city
def get_city_codes(self, origin, dest):
"""
Returns tuple of verified origin and destination codes
"""
origin_code = None # city or branch code
dest_codes = [] # city or branch codes list
calc_result = err = errors = None
city = ''
origin_code = self.validate_code(origin) or self.get_cached_origin_code(origin)
if origin_code is None:
raise OriginCityNotFoundError(origin)
dest_codes.append(self.validate_code(dest))
if not dest_codes[0]:
city = dest.line4
region = dest.state
if not city:
raise CityNotFoundError('city_not_set')
dest_codes, errors = self.get_cached_codes(self.clean_city_name(city))
if not dest_codes:
raise CityNotFoundError(city or dest, errors)
if len(dest_codes) > 1:
raise TooManyFoundError(city or dest, errors)
else:
return origin_code, dest_codes[0]
def get_all_branches(self):
cache_key = "%s_branches" % self.name
errors = False
res = cache.get(cache_key)
if not res:
res, errors = self.api.get_branches()
if not errors:
cache.set(cache_key, json.dumps(res))
else:
res = []
else:
res = json.loads(res)
return errors or res
def get_by_code(self, code):
"""
Returns False if code is not valid API city code,
if not, returns code casted to int.
Subclasses should implement it.
"""
raise NotImplementedError
def get_extra_form(self, *args, **kwargs):
"""
Return additional form if ambiguous data posted
via shipping address form so calculate() method requires
user action.
Subclasses should implement it.
"""
pass
def validate_code(self, code):
"""
Returns False if code is not valid PEC city code,
if not, returns code casted to int
Subclasses should implement it.
"""
raise NotImplementedError
def get_charges(self, weight, packs, origin, dest):
"""
Subclasses should implement it.
"""
raise NotImplementedError
def get_charge(self, origin, dest, packs, options=None):
"""
Subclasses should implement it.
"""
raise NotImplementedError
def parse_results(self, results, **kwargs):
"""
Parses results returned by get_charges() method.
Get some additional kwargs for detailed info or extra form.
Returns tuple (charge, messages, errors, extra_form)
Subclasses should implement it.
"""
raise NotImplementedError
def get_queryset(self):
""" Return normalized queryset-like list of dicts
{ 'id' : <city code>, 'branch' : <branch title>, 'text': <city title> }
Subclasses should implement it.
"""
raise NotImplementedError
def format_objects(self, qs):
""" Prepare data for select2 option list.
Should return smth like grouped
[{ 'text' : <branch_name>,
'children' : { 'id' : <city_id>,
'text' : <city_name> }
...
},...]
or
[{ 'id' : <city_id>,
'text' : <city_name> },...]
for non-categorized lists.
Subclasses should implement it.
"""
raise NotImplementedError
| {
"content_hash": "5fd0afb210382a2a8fa32833128d2b0a",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 112,
"avg_line_length": 33.14,
"alnum_prop": 0.5449607724803862,
"repo_name": "okfish/django-oscar-shipping",
"id": "688cef07a07f499b22ed86ee5a6464d6c29e33e9",
"size": "6628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar_shipping/facade/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6892"
},
{
"name": "JavaScript",
"bytes": "2268"
},
{
"name": "Makefile",
"bytes": "1278"
},
{
"name": "Python",
"bytes": "80135"
}
],
"symlink_target": ""
} |
import os, sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, render_template, request, url_for
from datetime import datetime
_wd = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__, static_url_path='/static')
@app.route('/')
def home():
return render_template('home.html')
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "3966c69d97114e0224ca80b18b8fadbc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.75,
"alnum_prop": 0.6868131868131868,
"repo_name": "mabdrabo/FlaskHero",
"id": "ab15a7e6f1d8c98e1f921147e1542de7a812ae0e",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/proj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302"
},
{
"name": "Python",
"bytes": "380"
}
],
"symlink_target": ""
} |
from tests.testing_harness import TestHarness
def test_lattice_mixed():
harness = TestHarness('statepoint.10.h5')
harness.main()
| {
"content_hash": "fc08f723cf1f7791a400f916beeb8c6d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.7338129496402878,
"repo_name": "johnnyliu27/openmc",
"id": "3df1d7a4f3994f8d2dcb00a878d13d7d3c5b1360",
"size": "139",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/regression_tests/lattice_mixed/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7619"
},
{
"name": "C++",
"bytes": "825710"
},
{
"name": "CMake",
"bytes": "33163"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Fortran",
"bytes": "1089808"
},
{
"name": "Python",
"bytes": "2433489"
},
{
"name": "Shell",
"bytes": "2986"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class as_path_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/as-path-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: AS_PATH manipulation parameters for the BGP neighbor or
group
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "as-path-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"as-path-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/config (container)
YANG Description: Configuration parameters relating to AS_PATH manipulation
for the BGP peer or group
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to AS_PATH manipulation
for the BGP peer or group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/state (container)
YANG Description: State information relating to the AS_PATH manipulation
mechanisms for the BGP peer or group
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the AS_PATH manipulation
mechanisms for the BGP peer or group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class as_path_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/as-path-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: AS_PATH manipulation parameters for the BGP neighbor or
group
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "as-path-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"as-path-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/config (container)
YANG Description: Configuration parameters relating to AS_PATH manipulation
for the BGP peer or group
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to AS_PATH manipulation
for the BGP peer or group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/state (container)
YANG Description: State information relating to the AS_PATH manipulation
mechanisms for the BGP peer or group
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the AS_PATH manipulation
mechanisms for the BGP peer or group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| {
"content_hash": "30b2e3a891f866bb5fff60c59f5a68de",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 377,
"avg_line_length": 38.74583333333333,
"alnum_prop": 0.5832885256479191,
"repo_name": "napalm-automation/napalm-yang",
"id": "2b62a9a606c947abb7def30a4fe81407ce6bb022",
"size": "18622",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/as_path_options/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import socket,string
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('localhost',8886))
def compress(uncompressed):
"""Compress a string to a list of output symbols."""
# Build the dictionary.
dict_size = 256
dictionary = dict((chr(i), chr(i)) for i in xrange(dict_size))
# in Python 3: dictionary = {chr(i): chr(i) for i in range(dict_size)}
w = ""
result = []
for c in uncompressed:
wc = w + c
if wc in dictionary:
w = wc
else:
result.append(dictionary[w])
# Add wc to the dictionary.
dictionary[wc] = dict_size
dict_size += 1
w = c
# Output the code for w.
if w:
result.append(dictionary[w])
return result
def decompress(compressed):
"""Decompress a list of output ks to a string."""
from cStringIO import StringIO
# Build the dictionary.
dict_size = 256
dictionary = dict((chr(i), chr(i)) for i in xrange(dict_size))
result = StringIO()
w = compressed.pop(0)
result.write(w)
for k in compressed:
if k in dictionary:
entry = dictionary[k]
elif k == dict_size:
entry = w + w[0]
else:
raise ValueError('Bad compressed k: %s' % k)
result.write(entry)
# Add w+entry[0] to the dictionary.
dictionary[dict_size] = w + entry[0]
dict_size += 1
w = entry
return result.getvalue()
compressed = compress('Delta Kapa')
print (compressed)
string = ' '
for p in compressed:
string=''.join(map(str,compressed))
string=string.encode('ascii')
s.send(string)
print string
s.close()
| {
"content_hash": "2fe004d9045f9805668cc2ee14e17133",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 26.40625,
"alnum_prop": 0.5792899408284023,
"repo_name": "CSE-SOE-CUSAT/NOSLab",
"id": "77cb680bf8307366c7dcb40b66b7c66381405e5f",
"size": "1690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSA/unsorted/12-10-15/lzw_cl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28101"
},
{
"name": "HTML",
"bytes": "189807"
},
{
"name": "Python",
"bytes": "48707"
}
],
"symlink_target": ""
} |
"""
Just a script to make it easy to test testing infrastracture itself.
"""
import coverage
import os
import subprocess
import sys
def subprocess_main():
print('subprocess_main')
def main():
if sys.argv[1:] == ['subprocess']:
print('subprocess')
cov = coverage.process_startup()
subprocess_main()
elif sys.argv[1:] == []:
print('process')
subprocess.check_call((sys.executable, __file__, 'subprocess'))
if __name__ == "__main__":
main()
| {
"content_hash": "2b6be6a2ea336f6f8bc834f0064bebb5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 21.652173913043477,
"alnum_prop": 0.6104417670682731,
"repo_name": "elmopl/ktba",
"id": "62053c8013316c5023c50560bda48e0b1a0f4a3d",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dummy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59711"
},
{
"name": "Shell",
"bytes": "1228"
}
],
"symlink_target": ""
} |
import socket
import threading
import time
import xmlrpclib
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_arg import bitcoin, util
from electrum_arg import transaction
from electrum_arg.plugins import BasePlugin, hook
from electrum_arg.i18n import _
from electrum_arg.wallet import Multisig_Wallet
from electrum_arg_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
PORT = 12344
HOST = 'cosigner.electrum.org'
server = xmlrpclib.ServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.emit(SIGNAL("cosigner:receive"), keyhash,
message)
# poll every 30 seconds
time.sleep(30)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QObject()
self.obj.connect(self.obj, SIGNAL('cosigner:receive'), self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1].encode('hex')
_hash = bitcoin.Hash(K).encode('hex')
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum_arg.keystore import is_xpubkey, parse_xpubkey
from electrum_arg.transaction import x_to_xpub
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(tx.raw, K)
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message("Failed to send transaction to cosigning pool.")
return
window.show_message("Your transaction was sent to the cosigning pool.\nOpen your cosigner wallet to retrieve it.")
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if wallet.has_password():
password = window.password_dialog('An encrypted transaction was retrieved from cosigning pool.\nPlease enter your password to decrypt it.')
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.\nDo you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
k = bitcoin.deserialize_xprv(xprv)[-1].encode('hex')
EC = bitcoin.EC_KEY(k.decode('hex'))
message = EC.decrypt_message(message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message(str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| {
"content_hash": "7782104db9d1e94d93fd581f2cd61ec6",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 151,
"avg_line_length": 33.708333333333336,
"alnum_prop": 0.5738566131025958,
"repo_name": "argentumproject/electrum-arg",
"id": "a2bb522b388195d290517e6016668077ed0725c3",
"size": "7635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/cosigner_pool/qt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3869"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "NSIS",
"bytes": "7179"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Python",
"bytes": "1244527"
},
{
"name": "Shell",
"bytes": "7098"
}
],
"symlink_target": ""
} |
import os, sys
if len(sys.argv) < 2:
print('Usage: %s' % (sys.argv[0]))
sys.exit(1)
# Always apply env config to env scripts as well
conf_files = ['conf/pulsar_env.sh', 'conf/bkenv.sh'] + sys.argv[1:]
for conf_filename in conf_files:
lines = [] # List of config file lines
keys = {} # Map a key to its line number in the file
# Load conf file
for line in open(conf_filename):
lines.append(line)
line = line.strip()
if not line or line.startswith('#'):
continue
k,v = line.split('=', 1)
keys[k] = len(lines) - 1
# Update values from Env
for k in sorted(os.environ.keys()):
v = os.environ[k]
if k in keys:
print('[%s] Applying config %s = %s' % (conf_filename, k, v))
idx = keys[k]
lines[idx] = '%s=%s\n' % (k, v)
# Store back the updated config in the same file
f = open(conf_filename, 'w')
for line in lines:
f.write(line)
f.close()
| {
"content_hash": "ee5fb91f8bb6e5786200dd779264b5f4",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 27.805555555555557,
"alnum_prop": 0.5494505494505495,
"repo_name": "nkurihar/pulsar",
"id": "0ad9b2b8238543e972ba71c5045230d514215fc4",
"size": "1966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/pulsar/scripts/apply-config-from-env.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2170"
},
{
"name": "C",
"bytes": "145811"
},
{
"name": "C++",
"bytes": "1327528"
},
{
"name": "CMake",
"bytes": "23019"
},
{
"name": "CSS",
"bytes": "31825"
},
{
"name": "Dockerfile",
"bytes": "26393"
},
{
"name": "Go",
"bytes": "109755"
},
{
"name": "Groovy",
"bytes": "20767"
},
{
"name": "HCL",
"bytes": "13762"
},
{
"name": "HTML",
"bytes": "133834"
},
{
"name": "Java",
"bytes": "13217208"
},
{
"name": "JavaScript",
"bytes": "80337"
},
{
"name": "Makefile",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "442677"
},
{
"name": "Ruby",
"bytes": "20575"
},
{
"name": "Shell",
"bytes": "163519"
},
{
"name": "Smarty",
"bytes": "1042"
}
],
"symlink_target": ""
} |
import json
import logging
import time
import unittest
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from desktop.models import Document
from hadoop import cluster
from hadoop.conf import YARN_CLUSTERS
from hadoop.pseudo_hdfs4 import is_live_cluster
from hadoop.yarn import resource_manager_api, mapreduce_api, history_server_api
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.models import Workflow
from jobbrowser import models, views
from jobbrowser.conf import SHARE_JOBS
from jobbrowser.models import can_view_job, can_modify_job, Job, LinkJobLogs
LOG = logging.getLogger(__name__)
_INITIALIZED = False
class TestBrowser():
def test_dots_to_camel_case(self):
assert_equal("fooBar", models.dots_to_camel_case("foo.bar"))
assert_equal("fooBarBaz", models.dots_to_camel_case("foo.bar.baz"))
assert_equal("foo", models.dots_to_camel_case("foo"))
assert_equal("foo.", models.dots_to_camel_case("foo."))
def test_get_path(self):
assert_equal("/foo/bar", models.get_path("hdfs://host/foo/bar"))
def test_format_counter_name(self):
assert_equal("Foo Bar", views.format_counter_name("fooBar"))
assert_equal("Foo Bar Baz", views.format_counter_name("fooBarBaz"))
assert_equal("Foo", views.format_counter_name("foo"))
assert_equal("Foo.", views.format_counter_name("foo."))
assert_equal("A Bbb Ccc", views.format_counter_name("A_BBB_CCC"))
def get_hadoop_job_id(oozie_api, oozie_jobid, action_index=1, timeout=60, step=5):
hadoop_job_id = None
start = time.time()
while not hadoop_job_id and time.time() - start < timeout:
time.sleep(step)
hadoop_job_id = oozie_api.get_job(oozie_jobid).actions[action_index].externalId
if not hadoop_job_id:
logs = OozieServerProvider.oozie.get_job_log(oozie_jobid)
msg = "[%d] %s took more than %d to create a job: %s" % (time.time(), oozie_jobid, timeout, logs)
LOG.info(msg)
raise Exception(msg)
return hadoop_job_id
class TestJobBrowserWithHadoop(unittest.TestCase, OozieServerProvider):
requires_hadoop = True
@classmethod
def setup_class(cls):
OozieServerProvider.setup_class()
cls.username = 'hue_jobbrowser_test'
cls.home_dir = '/user/%s' % cls.username
cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir)
cls.client = make_logged_in_client(username=cls.username, is_superuser=False, groupname='test')
cls.user = User.objects.get(username=cls.username)
grant_access(cls.username, 'test', 'jobsub')
grant_access(cls.username, 'test', 'jobbrowser')
grant_access(cls.username, 'test', 'oozie')
add_to_group(cls.username)
cls.prev_user = cls.cluster.fs.user
cls.cluster.fs.setuser(cls.username)
cls.install_examples()
cls.design = cls.create_design()
# Run the sleep example, since it doesn't require user home directory
design_id = cls.design.id
response = cls.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid)
cls.hadoop_job_id = get_hadoop_job_id(cls.oozie, oozie_jobid, 1)
cls.hadoop_job_id_short = views.get_shorter_id(cls.hadoop_job_id)
@classmethod
def teardown_class(cls):
try:
Document.objects.filter(name__contains=cls.username).delete()
Workflow.objects.filter(name__contains=cls.username).delete()
# Remove user home directories.
cls.cluster.fs.do_as_superuser(cls.cluster.fs.rmtree, cls.home_dir)
except:
LOG.exception('failed to teardown %s' % cls.home_dir)
cls.cluster.fs.setuser(cls.prev_user)
@classmethod
def create_design(cls):
job_name = '%s_%s' % (cls.username, 'sleep_job')
if not Document.objects.available_docs(Workflow, cls.user).filter(name=job_name).exists():
response = cls.client.post(reverse('jobsub.views.new_design',
kwargs={'node_type': 'mapreduce'}),
data={'name': job_name,
'description': '',
'node_type': 'mapreduce',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'files': '[]',
'archives': '[]',
'job_properties': '[{\"name\":\"mapred.reduce.tasks\",\"value\":\"1\"},{\"name\":\"mapred.mapper.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.reducer.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.mapoutput.key.class\",\"value\":\"org.apache.hadoop.io.IntWritable\"},{\"name\":\"mapred.mapoutput.value.class\",\"value\":\"org.apache.hadoop.io.NullWritable\"},{\"name\":\"mapred.output.format.class\",\"value\":\"org.apache.hadoop.mapred.lib.NullOutputFormat\"},{\"name\":\"mapred.input.format.class\",\"value\":\"org.apache.hadoop.examples.SleepJob$SleepInputFormat\"},{\"name\":\"mapred.partitioner.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.speculative.execution\",\"value\":\"false\"},{\"name\":\"sleep.job.map.sleep.time\",\"value\":\"0\"},{\"name\":\"sleep.job.reduce.sleep.time\",\"value\":\"${REDUCER_SLEEP_TIME}\"}]'
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
return Document.objects.available_docs(Workflow, cls.user).get(name=job_name).content_object
@classmethod
def install_examples(cls):
global _INITIALIZED
if _INITIALIZED:
return
cls.client.post(reverse('oozie:install_examples'))
cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir)
cls.cluster.fs.do_as_superuser(cls.cluster.fs.chmod, cls.home_dir, 0777, True)
_INITIALIZED = True
def test_uncommon_views(self):
"""
These views exist, but tend not to be ever called, because they're not in the normal UI.
"""
raise SkipTest
TestJobBrowserWithHadoop.client.get("/jobbrowser/clusterstatus")
TestJobBrowserWithHadoop.client.get("/jobbrowser/queues")
TestJobBrowserWithHadoop.client.get("/jobbrowser/jobbrowser")
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
if is_live_cluster():
raise SkipTest('HUE-2902: Skipping because test is not reentrant')
# Create design that will fail because the script file isn't there
INPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/input'
OUTPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/output'
try:
TestJobBrowserWithHadoop.cluster.fs.mkdir(TestJobBrowserWithHadoop.home_dir + "/jt-test_failed_jobs")
TestJobBrowserWithHadoop.cluster.fs.mkdir(INPUT_DIR)
TestJobBrowserWithHadoop.cluster.fs.rmtree(OUTPUT_DIR)
except:
LOG.exception('failed to teardown tests')
job_name = '%s_%s' % (TestJobBrowserWithHadoop.username, 'test_failed_jobs-1')
response = TestJobBrowserWithHadoop.client.post(reverse('jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}), {
'name': [job_name],
'description': ['description test_failed_jobs-1'],
'args': '',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'archives': '[]',
'files': '[]',
'job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)]
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', follow=True)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict['id'])
response = TestJobBrowserWithHadoop.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
job = OozieServerProvider.wait_until_completion(oozie_jobid)
hadoop_job_id = get_hadoop_job_id(TestJobBrowserWithHadoop.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_true(hadoop_job_id_short in response.content)
raise SkipTest # Not compatible with MR2
# The single job view should have the failed task table
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html, html)
# The map task should say success (empty input)
map_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
def test_jobs_page(self):
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=completed')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=running')
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
def test_tasks_page(self):
raise SkipTest
# Test tracker page
early_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (TestJobBrowserWithHadoop.hadoop_job_id, early_task_id))
tracker_url = re.search('<a href="(/jobbrowser/trackers/.+?)"', response.content).group(1)
response = TestJobBrowserWithHadoop.client.get(tracker_url)
assert_true('Tracker at' in response.content)
def test_job_permissions(self):
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
def test_job_counter(self):
raise SkipTest
# Single job page
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s' % TestJobBrowserWithHadoop.hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
def test_task_page(self):
raise SkipTest
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
def test_job_single_logs_page(self):
raise SkipTest
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/single_logs' % (TestJobBrowserWithHadoop.hadoop_job_id))
assert_true('syslog' in response.content, response.content)
assert_true('<div class="tab-pane active" id="logsSysLog">' in response.content or
'<div class="tab-pane active" id="logsStdErr">' in response.content or # Depending on Hadoop
'<div class="tab-pane active" id="logsStdOut">' in response.content, # For jenkins
response.content)
class TestMapReduce1NoHadoop:
def test_acls_job(self):
job = MockMr1Job()
assert_true(can_view_job('test', job))
assert_true(can_modify_job('test', job))
assert_false(can_view_job('test2', job))
assert_false(can_modify_job('test2', job))
class MockMr1Job(Job):
def __init__(self):
self.is_mr2 = False
self._full_job_conf = {
'mapreduce.cluster.acls.enabled': True,
'mapreduce.job.acl-modify-job': 'test',
'mapreduce.job.acl-view-job': 'test'
}
class TestMapReduce2NoHadoop:
def setUp(self):
# Beware: Monkey patching
if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
if not hasattr(resource_manager_api, 'old_get_mapreduce_api'):
mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
if not hasattr(history_server_api, 'old_get_history_server_api'):
history_server_api.old_get_history_server_api = history_server_api.get_history_server_api
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "jobbrowser")
self.user = User.objects.get(username='test')
self.c2 = make_logged_in_client(is_superuser=False, username="test2")
grant_access("test2", "test2", "jobbrowser")
self.user2 = User.objects.get(username='test2')
resource_manager_api.get_resource_manager = lambda username: MockResourceManagerApi(username)
mapreduce_api.get_mapreduce_api = lambda username: MockMapreduceApi(username)
history_server_api.get_history_server_api = lambda: HistoryServerApi()
self.finish = [
YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
SHARE_JOBS.set_for_testing(False)
]
assert_true(cluster.is_yarn())
def tearDown(self):
resource_manager_api.get_resource_manager = getattr(resource_manager_api, 'old_get_resource_manager')
mapreduce_api.get_mapreduce_api = getattr(mapreduce_api, 'old_get_mapreduce_api')
history_server_api.get_history_server_api = getattr(history_server_api, 'old_get_history_server_api')
for f in self.finish:
f()
def test_jobs(self):
response = self.c.get('/jobbrowser/?format=json')
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 4)
response = self.c.get('/jobbrowser/jobs/?format=json&text=W=MapReduce-copy2')
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 1)
def test_applications_no_start_time(self):
response = self.c.get('/jobbrowser/?format=json')
data = json.loads(response.content)
job = [j for j in data['jobs'] if j['id'] == 'application_1428442704693_0007']
assert_true(job, job)
job = job[0]
assert_equal('', job['startTimeFormatted'], data)
assert_equal('', job['durationFormatted'], data)
def test_running_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content, response.content)
assert_true('RUNNING' in response.content)
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content)
assert_true('RUNNING' in response.content)
def test_application_no_start_time(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0007?format=json')
data = json.loads(response.content)
assert_equal('', data['job']['startTimeFormatted'], data)
assert_equal('', data['job']['durationFormatted'], data)
def test_finished_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
def test_spark_job(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0006')
assert_equal(response.context['job'].jobId, 'application_1428442704693_0006')
def test_yarn_job(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0007')
assert_equal(response.context['job'].jobId, 'application_1428442704693_0007')
def job_not_assigned(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url')
assert_equal(response.context['jobid'], 'job_1356251510842_0009')
assert_equal(response.context['path'], '/my_url')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url?format=json')
result = json.loads(response.content)
assert_equal(result['status'], 0)
def test_acls_job(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054') # Check in perm decorator
assert_true(can_view_job('test', response.context['job']))
assert_true(can_modify_job('test', response.context['job']))
response2 = self.c2.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('don't have permission to access job' in response2.content, response2.content)
assert_false(can_view_job('test2', response.context['job']))
assert_false(can_modify_job('test2', response.context['job']))
def test_kill_job(self):
job_id = 'application_1356251510842_0054'
try:
response = self.c.post('/jobbrowser/jobs/%s/kill?format=json' % job_id)
assert_equal(json.loads(response.content), {"status": 0})
finally:
MockResourceManagerApi.APPS[job_id]['state'] = 'RUNNING'
class MockResourceManagerApi:
APPS = {
'application_1356251510842_0054': {
u'finishedTime': 1356961070119,
u'name': u'oozie:launcher:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1356251510842_0054_01_000001/romain',
u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0054/jobhistory/job/job_1356251510842_0054',
u'amHostHttpAddress': u'localhost:8042',
u'startedTime': 1356961057225,
u'queue': u'default',
u'state': u'RUNNING',
u'elapsedTime': 12894,
u'finalStatus': u'UNDEFINED',
u'diagnostics': u'',
u'progress': 100.0,
u'trackingUI': u'History',
u'id': u'application_1356251510842_0054',
u'user': u'test',
# For when the job is KILLED
u'startTime': 1356961057226,
u'finishTime': 1356961057226,
u'applicationType': 'MAPREDUCE'
},
'application_1356251510842_0009': {
u'finishedTime': 1356467118570,
u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy2:A=Sleep:ID=0000002-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1356251510842_0009_01_000001/romain',
u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0009/jobhistory/job/job_1356251510842_0009',
u'amHostHttpAddress': u'localhost:8042',
u'startedTime': 1356467081121,
u'queue': u'default',
u'state': u'FINISHED',
u'elapsedTime': 37449,
u'finalStatus': u'SUCCEEDED',
u'diagnostics': u'',
u'progress': 100.0,
u'trackingUI': u'History',
u'id': u'application_1356251510842_0009',
u'user': u'test',
u'applicationType': 'MAPREDUCE'
},
'application_1428442704693_0006': {
u'allocatedMB': 4096,
u'allocatedVCores': 3,
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1428442704693_0006_01_000001/erickt',
u'amHostHttpAddress': u'localhost:8042',
u'applicationTags': u'',
u'applicationType': u'SPARK',
u'clusterId': 1428442704693,
u'diagnostics': u'',
u'elapsedTime': 529040,
u'finalStatus': u'UNDEFINED',
u'finishedTime': 0,
u'id': u'application_1428442704693_0006',
u'memorySeconds': 2138468,
u'name': u'Spark shell',
u'numAMContainerPreempted': 0,
u'numNonAMContainerPreempted': 0,
u'preemptedResourceMB': 0,
u'preemptedResourceVCores': 0,
u'progress': 10.0,
u'queue': u'root.erickt',
u'runningContainers': 3,
u'startedTime': 1428443335161,
u'state': u'RUNNING',
u'trackingUI': u'ApplicationMaster',
u'trackingUrl': u'http://localhost:8088/proxy/application_1428442704693_0006/',
u'user': u'test',
u'vcoreSeconds': 1567,
},
'application_1428442704693_0007': {
u'allocatedMB': -1,
u'allocatedVCores': -1,
u'applicationTags': u'',
u'applicationType': u'YARN',
u'clusterId': 1428442704693,
u'diagnostics': u'',
u'elapsedTime': 4056,
u'finalStatus': u'SUCCEEDED',
u'finishedTime': 1428454945371,
u'id': u'application_1428442704693_0007',
u'memorySeconds': 2290,
u'name': u'UnmanagedAM',
u'numAMContainerPreempted': 0,
u'numNonAMContainerPreempted': 0,
u'preemptedResourceMB': 0,
u'preemptedResourceVCores': 0,
u'progress': 100.0,
u'queue': u'root.erickt',
u'runningContainers': -1,
u'startedTime': 0,
u'state': u'FINISHED',
u'trackingUI': u'History',
u'trackingUrl': u'http://N/A',
u'user': u'test',
u'vcoreSeconds': 1,
},
}
def __init__(self, user, rm_url=None): pass
def apps(self, **kwargs):
return {
'apps': {
'app': [
# RUNNING
MockResourceManagerApi.APPS['application_1356251510842_0054'],
# FINISHED
MockResourceManagerApi.APPS['application_1356251510842_0009'],
# SPARK
MockResourceManagerApi.APPS['application_1428442704693_0006'],
# YARN
MockResourceManagerApi.APPS['application_1428442704693_0007'],
]
}
}
def app(self, job_id):
return {
u'app': MockResourceManagerApi.APPS[job_id]
}
class MockMapreduce2Api(object):
"""
MockMapreduceApi and HistoryServerApi are very similar and inherit from it.
"""
def __init__(self, mr_url=None): pass
def tasks(self, job_id):
return {
u'tasks': {
u'task': [{
u'finishTime': 1357153330271, u'successfulAttempt': u'attempt_1356251510842_0062_m_000000_0', u'elapsedTime': 1901, u'state': u'SUCCEEDED',
u'startTime': 1357153328370, u'progress': 100.0, u'type': u'MAP', u'id': u'task_1356251510842_0062_m_000000'},
{
u'finishTime': 0, u'successfulAttempt': u'', u'elapsedTime': 0, u'state': u'SCHEDULED', u'startTime': 1357153326322, u'progress': 0.0,
u'type': u'REDUCE', u'id': u'task_1356251510842_0062_r_000000'}
]
}
}
def conf(self, job_id):
return {
"conf" : {
"path" : "hdfs://host.domain.com:9000/user/user1/.staging/job_1326232085508_0004/job.xml",
"property" : [
{
"name" : "dfs.datanode.data.dir",
"value" : "/home/hadoop/hdfs/data",
}, {
"name" : "mapreduce.job.acl-modify-job",
"value" : "test",
}, {
"name" : "mapreduce.job.acl-view-job",
"value" : "test",
}
]
}
}
def job_attempts(self, job_id):
return {
"jobAttempts" : {
"jobAttempt" : [
{
"nodeId" : "host.domain.com:8041",
"nodeHttpAddress" : "host.domain.com:8042",
"startTime" : 1326238773493,
"id" : 1,
"logsLink" : "http://host.domain.com:8042/node/containerlogs/container_1326232085508_0004_01_000001",
"containerId" : "container_1326232085508_0004_01_000001"
}
]
}
}
def task_attempts(self, job_id, task_id):
return {
"taskAttempts" : {
"taskAttempt" : [
{
"elapsedMergeTime" : 47,
"shuffleFinishTime" : 1326238780052,
"assignedContainerId" : "container_1326232085508_0004_01_000003",
"progress" : 100,
"elapsedTime" : 0,
"state" : "RUNNING",
"elapsedShuffleTime" : 2592,
"mergeFinishTime" : 1326238780099,
"rack" : "/98.139.92.0",
"elapsedReduceTime" : 0,
"nodeHttpAddress" : "host.domain.com:8042",
"type" : "REDUCE",
"startTime" : 1326238777460,
"id" : "attempt_1326232085508_4_4_r_0_0",
"finishTime" : 0
}
]
}
}
def counters(self, job_id):
return {
"jobCounters" : {
"id" : "job_1326232085508_4_4",
"counterGroup" : [
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_READ"
}
]
},
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_WRITTEN"
}
]
}
]
}
}
def kill(self, job_id):
job_id = job_id.replace('job', 'application')
MockResourceManagerApi.APPS[job_id]['state'] = 'KILLED'
return {}
class MockMapreduceApi(MockMapreduce2Api):
def job(self, user, job_id):
if '1356251510842_0009' not in job_id:
job = {
u'job': {
u'reducesCompleted': 0, u'mapsRunning': 1, u'id': u'job_1356251510842_0054', u'successfulReduceAttempts': 0, u'successfulMapAttempts': 0,
u'uberized': False, u'reducesTotal': 1, u'elapsedTime': 3426, u'mapsPending': 0, u'state': u'RUNNING', u'failedReduceAttempts': 0,
u'mapsCompleted': 0, u'killedMapAttempts': 0, u'killedReduceAttempts': 0, u'runningReduceAttempts': 0, u'failedMapAttempts': 0, u'mapsTotal': 1,
u'user': u'test', u'startTime': 1357152972886, u'reducesPending': 1, u'reduceProgress': 0.0, u'finishTime': 0,
u'name': u'select avg(salary) from sample_07(Stage-1)', u'reducesRunning': 0, u'newMapAttempts': 0, u'diagnostics': u'', u'mapProgress': 0.0,
u'runningMapAttempts': 1, u'newReduceAttempts': 1,
# Does not seems to exist in API, we actually skip it in case.
"acls" : [{
"value" : "test",
"name" : "mapreduce.job.acl-modify-job"
}, {
"value" : "test",
"name" : "mapreduce.job.acl-view-job"
}
],
}
}
job['job']['id'] = job_id
return job
class HistoryServerApi(MockMapreduce2Api):
def __init__(self, hs_url=None): pass
def job(self, user, job_id):
if '1356251510842_0054' == job_id:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': job_id,
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'KILLED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
else:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': u'job_1356251510842_0009',
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'SUCCEEDED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
def test_make_log_links():
"""
Unit test for models.LinkJobLogs._make_links
"""
# FileBrowser
assert_equal(
"""<a href="/filebrowser/view=/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a> <dir>""",
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp <dir>')
)
assert_equal(
"""<a href="/filebrowser/view=/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a><dir>""",
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp<dir>')
)
assert_equal(
"""output: <a href="/filebrowser/view=/user/romain/tmp" target="_blank">/user/romain/tmp</a> <dir>""",
LinkJobLogs._make_links('output: /user/romain/tmp <dir>')
)
assert_equal(
'Successfully read 3760 records (112648 bytes) from: "<a href="/filebrowser/view=/user/hue/pig/examples/data/midsummer.txt" target="_blank">/user/hue/pig/examples/data/midsummer.txt</a>"',
LinkJobLogs._make_links('Successfully read 3760 records (112648 bytes) from: "/user/hue/pig/examples/data/midsummer.txt"')
)
assert_equal(
'data,upper_case MAP_ONLY <a href="/filebrowser/view=/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>,',
LinkJobLogs._make_links('data,upper_case MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff,')
)
assert_equal(
'MAP_ONLY <a href="/filebrowser/view=/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>\n2013',
LinkJobLogs._make_links('MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff\n2013')
)
assert_equal(
' <a href="/filebrowser/view=/jobs.tsv" target="_blank">/jobs.tsv</a> ',
LinkJobLogs._make_links(' /jobs.tsv ')
)
assert_equal(
'<a href="/filebrowser/view=/user/romain/job_pos_2012.tsv" target="_blank">hdfs://localhost:8020/user/romain/job_pos_2012.tsv</a>',
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/job_pos_2012.tsv')
)
# JobBrowser
assert_equal(
"""<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('job_201306261521_0058')
)
assert_equal(
"""Hadoop Job IDs executed by Pig: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('Hadoop Job IDs executed by Pig: job_201306261521_0058')
)
assert_equal(
"""MapReduceLauncher - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('MapReduceLauncher - HadoopJobId: job_201306261521_0058')
)
assert_equal(
"""- More information at: http://localhost:50030/jobdetails.jsp?jobid=<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('- More information at: http://localhost:50030/jobdetails.jsp?jobid=job_201306261521_0058')
)
assert_equal(
""" Logging error messages to: <a href="/jobbrowser/jobs/job_201307091553_0028" target="_blank">job_201307091553_0028</a>/attempt_201307091553_002""",
LinkJobLogs._make_links(' Logging error messages to: job_201307091553_0028/attempt_201307091553_002')
)
assert_equal(
""" pig-<a href="/jobbrowser/jobs/job_201307091553_0028" target="_blank">job_201307091553_0028</a>.log""",
LinkJobLogs._make_links(' pig-job_201307091553_0028.log')
)
assert_equal(
"""MapReduceLauncher - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>. Look at the UI""",
LinkJobLogs._make_links('MapReduceLauncher - HadoopJobId: job_201306261521_0058. Look at the UI')
)
| {
"content_hash": "02a456eaef39e8a0751d7c5db47954d4",
"timestamp": "",
"source": "github",
"line_count": 844,
"max_line_length": 937,
"avg_line_length": 43.77843601895734,
"alnum_prop": 0.6429943976832931,
"repo_name": "MobinRanjbar/hue",
"id": "0cc2784288cdbe8e369eb764e0afff16e027bd23",
"size": "37742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/jobbrowser/src/jobbrowser/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2397157"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "453436"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "24042046"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "3220761"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Makefile",
"bytes": "114862"
},
{
"name": "Mako",
"bytes": "2450286"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "38423121"
},
{
"name": "Scala",
"bytes": "215057"
},
{
"name": "Shell",
"bytes": "54810"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "259222"
},
{
"name": "XSLT",
"bytes": "516845"
}
],
"symlink_target": ""
} |
from interfaces import Interface
from primitives import Cathode
from components import Split, Join, Transistor
from logicgates import Or, Not, And
from multiplexers import TwoToFourDecoder
from mixins import InputSetMixin, InputResetMixin, InputDataMixin, InputWriteMixin, InputDataEightBitMixin, InputDataInEightBitMixin, InputWriteMixin, InputWriteEnableMixin, InputReadEnableMixin, InputDataInMixin, InputAddressFourBitMixin, OutputMixin, OutputEightBitMixin, OutputDataOutMixin, OutputDataOutEightBitMixin
class AndOrLatch(Interface, InputSetMixin, InputResetMixin, OutputMixin):
def __init__(self):
o = Or()
a = And()
n = Not()
inputs = {}
inputs["set"] = o.input_b
inputs["reset"] = n.input
outputs = {}
output = Cathode()
outputs["output"] = output
o.output.connect(a.input_a)
n.output.connect(a.input_b)
a_output_split = Split()
a.output.connect(a_output_split.input)
a_output_split.connect(o.input_a)
a_output_split.connect(output)
super(AndOrLatch, self).__init__(inputs, outputs)
def __str__(self):
return "And Or Latch: " + super(AndOrLatch, self).__str__()
class GatedLatch(Interface, InputDataMixin, InputWriteMixin, OutputMixin):
def __init__(self):
data_split = Split()
write_split = Split()
inputs = {}
inputs["data"] = data_split.input
inputs["write"] = write_split.input
outputs = {}
output = Cathode()
outputs["output"] = output
n1 = Not()
n2 = Not()
a1 = And()
a2 = And()
a3 = And()
o = Or()
data_split.connect(a1.input_a)
data_split.connect(n1.input)
n1.output.connect(a2.input_a)
write_split.connect(a1.input_b)
write_split.connect(a2.input_b)
a1.output.connect(o.input_b)
a2.output.connect(n2.input)
o.output.connect(a3.input_a)
n2.output.connect(a3.input_b)
a_output_split = Split()
a3.output.connect(a_output_split.input)
a_output_split.connect(o.input_a)
a_output_split.connect(output)
super(GatedLatch, self).__init__(inputs, outputs)
def __str__(self):
return "Gated Latch: " + super(GatedLatch, self).__str__()
class RAMCell(Interface, InputWriteEnableMixin, InputReadEnableMixin, InputDataInMixin, OutputDataOutMixin):
def __init__(self):
a1 = And()
a2 = And()
a3 = And()
t = Transistor(connect_to_power = False)
gl = GatedLatch()
#wire up row and col selector
row = a1.input_a
col = a1.input_b
a1_split = Split(a2.input_b, a3.input_a)
a1.output.connect(a1_split.input)
#write / read enable inputs
write_enable = a2.input_a
read_enable = a3.input_b
#wire up gated latch inputs
a2.output.connect(gl.write)
data_in = gl.data
inputs = {}
inputs["row"] = row
inputs["col"] = col
inputs["write_enable"] = write_enable
inputs["read_enable"] = read_enable
inputs["data_in"] = data_in
#wire up the data out transistor
gl.output.connect(t.collector)
a3.output.connect(t.base)
data_out = t.emitter
outputs = {}
outputs["data_out"] = data_out
super(RAMCell, self).__init__(inputs, outputs)
@property
def row(self):
return self.inputs["row"]
@property
def col(self):
return self.inputs["col"]
def __str__(self):
return "RAM Cell: " + super(RAMCell, self).__str__()
class SixteenBitMemory(Interface, InputAddressFourBitMixin, InputWriteEnableMixin, InputReadEnableMixin, InputDataInMixin, OutputDataOutMixin):
def __init__(self):
# create the memory address
col_decoder = TwoToFourDecoder()
row_decoder = TwoToFourDecoder()
address = [row_decoder.input_a, row_decoder.input_b, col_decoder.input_a, col_decoder.input_b]
write_enable = Split()
read_enable = Split()
data_in = Split()
data_out = Join()
# create ram cells in a matrix
# create splits for the address
row_a_split = Split()
row_b_split = Split()
row_c_split = Split()
row_d_split = Split()
row_decoder.output_a.connect(row_a_split.input)
row_decoder.output_b.connect(row_b_split.input)
row_decoder.output_c.connect(row_c_split.input)
row_decoder.output_d.connect(row_d_split.input)
col_a_split = Split()
col_b_split = Split()
col_c_split = Split()
col_d_split = Split()
col_decoder.output_a.connect(col_a_split.input)
col_decoder.output_b.connect(col_b_split.input)
col_decoder.output_c.connect(col_c_split.input)
col_decoder.output_d.connect(col_d_split.input)
address_row_outputs = [row_a_split, row_b_split, row_c_split, row_d_split]
address_col_outputs = [col_a_split, col_b_split, col_c_split, col_d_split]
for row in range(4):
for col in range(4):
# create a ram cell and write it up
ram_cell = RAMCell()
address_row_outputs[row].connect(ram_cell.row)
address_col_outputs[col].connect(ram_cell.col)
read_enable.connect(ram_cell.read_enable)
write_enable.connect(ram_cell.write_enable)
data_in.connect(ram_cell.data_in)
data_out.connect(ram_cell.data_out)
inputs = {}
inputs["address"] = address
inputs["write_enable"] = write_enable.input
inputs["read_enable"] = read_enable.input
inputs["data_in"] = data_in.input
outputs = {}
outputs["data_out"] = data_out.output
super(SixteenBitMemory, self).__init__(inputs, outputs)
def __str__(self):
return "Sixteen Bit Memory: " + super(SixteenBitMemory, self).__str__()
class SixteenByteMemory(Interface, InputAddressFourBitMixin, InputWriteEnableMixin, InputReadEnableMixin, InputDataInEightBitMixin, OutputDataOutEightBitMixin):
def __init__(self):
# create 8 16 bit memory modules and chain them together
address_splits = [Split(), Split(), Split(), Split()]
write_split = Split()
read_split = Split()
data_out = []
data_in = []
# create and connect up the 16 bit memory modules
for i in range(8):
bit = SixteenBitMemory()
# connect up the address
for j in range(4):
address_splits[j].connect(bit.address.get_bit(j))
# connect write and read enable
write_split.connect(bit.write_enable)
read_split.connect(bit.read_enable)
# create lists for data in and out
data_out.append(bit.data_out)
data_in.append(bit.data_in)
inputs = {}
inputs["address"] = [address_splits[0].input, address_splits[1].input, address_splits[2].input, address_splits[3].input]
inputs["write_enable"] = write_split.input
inputs["read_enable"] = read_split.input
inputs["data_in"] = data_in
outputs = {}
outputs["data_out"] = data_out
super(SixteenByteMemory, self).__init__(inputs, outputs)
def __str__(self):
return "Sixteen Byte Memory: " + super(SixteenBitMemory, self).__str__()
class EightBitRegister(Interface, InputDataEightBitMixin, InputWriteMixin, OutputEightBitMixin):
def __init__(self):
gl0 = GatedLatch()
gl1 = GatedLatch()
gl2 = GatedLatch()
gl3 = GatedLatch()
gl4 = GatedLatch()
gl5 = GatedLatch()
gl6 = GatedLatch()
gl7 = GatedLatch()
write_split = Split(gl0.write, gl1.write, gl2.write, gl3.write, gl4.write, gl5.write, gl6.write, gl7.write)
inputs = {}
inputs["write"] = write_split.input
inputs["data"] = [gl0.data, gl1.data, gl2.data, gl3.data, gl4.data, gl5.data, gl6.data, gl7.data]
outputs = {}
outputs["output"] = [gl0.output, gl1.output, gl2.output, gl3.output, gl4.output, gl5.output, gl6.output, gl7.output]
super(EightBitRegister, self).__init__(inputs, outputs)
def __str__(self):
return "8 bit register: inputs = {{data = {}, write = {}}}, outputs = {{output = {}}}".format(self.data, self.write, self.output)
| {
"content_hash": "6982a8d9d43f92d0e00fbc63451ac55d",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 320,
"avg_line_length": 35.154761904761905,
"alnum_prop": 0.5760243819844226,
"repo_name": "martinohanlon/PhilbinSS",
"id": "f298c2fc295631fd8f6b3e96dfafd8767c4aa688",
"size": "8859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "philbinss/memory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83254"
}
],
"symlink_target": ""
} |
from tests.unit import unittest
from boto.sts.connection import STSConnection
from tests.unit import AWSMockServiceTestCase
class TestSTSConnection(AWSMockServiceTestCase):
connection_class = STSConnection
def setUp(self):
super(TestSTSConnection, self).setUp()
def default_body(self):
return """
<AssumeRoleResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<AssumeRoleResult>
<AssumedRoleUser>
<Arn>arn:role</Arn>
<AssumedRoleId>roleid:myrolesession</AssumedRoleId>
</AssumedRoleUser>
<Credentials>
<SessionToken>session_token</SessionToken>
<SecretAccessKey>secretkey</SecretAccessKey>
<Expiration>2012-10-18T10:18:14.789Z</Expiration>
<AccessKeyId>accesskey</AccessKeyId>
</Credentials>
</AssumeRoleResult>
<ResponseMetadata>
<RequestId>8b7418cb-18a8-11e2-a706-4bd22ca68ab7</RequestId>
</ResponseMetadata>
</AssumeRoleResponse>
"""
def test_assume_role(self):
self.set_http_response(status_code=200)
response = self.service_connection.assume_role('arn:role', 'mysession')
self.assert_request_parameters(
{'Action': 'AssumeRole',
'RoleArn': 'arn:role',
'RoleSessionName': 'mysession'},
ignore_params_values=['Timestamp', 'AWSAccessKeyId',
'SignatureMethod', 'SignatureVersion',
'Version'])
self.assertEqual(response.credentials.access_key, 'accesskey')
self.assertEqual(response.credentials.secret_key, 'secretkey')
self.assertEqual(response.credentials.session_token, 'session_token')
self.assertEqual(response.user.arn, 'arn:role')
self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8b1635073df971ab60abf3bf5b6267ef",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 82,
"avg_line_length": 40.411764705882355,
"alnum_prop": 0.5992236778262979,
"repo_name": "jameslegg/boto",
"id": "f874cafd4c862bbbb91f413062f47ab580c2ddb0",
"size": "3207",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "tests/unit/sts/test_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "4227744"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
} |
import shutil
import os
import subprocess
import datetime
import re
import ulysses_sync_lib_1_0_2 as Ulib # Main library for syncing, xml2md- and md2xml-conversions.
make_marked_files = True # If True: Make Marked-files on top and bottom group level.
add_ul_uuid_to_export_filenames = True # Have to be True to sync changes back to same Sheet
# Users home folder:
HOME = os.getenv("HOME", "") + "/"
# Full rsync backup of Ulysses library (except Daedalus Touch):
# (Max two backups kept for each day: AM and PM)
# (Use Hazel or similar apps, for cleanup of old backups)
backup_path = HOME + "Ulysses Backup/"
# Backup is run before each sync.
# Here, all Ulysses sheets are exported as Markdown files,
# in a folder structure same as original Ulysses groups:
sync_path_mac = HOME + "Dropbox/Notebooks/My Writings/Ulysses Mac Export/"
sync_path_icloud = HOME + "Dropbox/Notebooks/My Writings/Ulysses iCloud Export/"
# sync_path_demo = HOME + "Ulysses Demo Export/"
# Here, all sheets under each top level group, are joined as single Markdown files:
md_joined_path = HOME + "Ulysses MMD Joined_temp/"
# Note "md_joined_path" is a "temporary" folder, and md-files here will be deleted
# and regenerated each time this script is run.
# So, if you edit any of these files, please save them in a diferent folder!
def copy_media(from_path, media_path, to_root):
# Copy media-files:
if not os.path.exists(media_path):
os.makedirs(media_path)
for media_file in os.listdir(from_path):
shutil.copy2(from_path + "/" + media_file, media_path)
if "/_Inbox/" in media_path:
# Inbox has ony one level:
return
# Also Copy all media-files to common folder,
# to get Marked to link media when using toplevel .marked-file.
media_top_path = media_path.replace(to_root, "")
media_top_path = to_root + media_top_path.split("/")[0] + "/Media"
if not os.path.exists(media_top_path):
os.makedirs(media_top_path)
for media_file in os.listdir(from_path):
shutil.copy2(from_path + "/" + media_file, media_top_path)
def backup_ulysses(from_path, backup_path, branch):
# date_time = datetime.datetime.now().strftime("%Y-%m-%d_%H") # Hourly cycle
date_time = datetime.datetime.now().strftime("%Y-%m-%d_%p") # Twice a day cycle (AM / PM)
# date_time = datetime.datetime.now().strftime("%Y-%m-%d") # Daily cycle
# date_time = datetime.datetime.now().strftime("%Y-%m-%d")[:-1] # 10 day cycle
backup_path = backup_path + branch + "_Library_" + date_time + "/"
if not os.path.exists(backup_path):
os.makedirs(backup_path)
print("=================================================================================")
print("*** BACKUP TO:", backup_path)
subprocess.call(['rsync', '-t', '-r', from_path, backup_path])
print("*** End Backup")
print()
def export_files(file_list, sync_temp, md_joined_path, last_synced, log, sync_path):
marked_text_top = ""
marked_text_bottom = ""
marked_top_modified = 0
marked_bottom_modified = 0
last_group_path = ""
last_path = ""
md_main_text = ""
ul2md = Ulib.UlyssesToMarkdown()
for line in file_list.split("\n"):
if line == "":
continue
columns = line.split("\t")
from_path = columns[0]
modified = columns[1]
to_path = columns[2]
to_file = columns[3]
to_full_path = sync_temp + to_path
if not os.path.exists(to_full_path):
os.makedirs(to_full_path)
md_text = ul2md.xml2markdown(from_path)
if os.path.exists(from_path + "/Media"):
media_path = to_full_path + "Media"
copy_media(from_path + "/Media", media_path, sync_temp)
for media_file in os.listdir(media_path):
# print(from_path, media_file)
try:
media_ref = media_file.split(".")[-2]
media_file = media_file.replace(" ", "%20")
# print(media_ref, media_file)
md_text = md_text.replace(media_ref + ".#fileref", media_file)
except:
pass
to_file_full = to_path + to_file + ".md"
ts_modified = Ulib.write_file_modified(sync_temp + to_file_full, md_text, modified)
# Check only to making log entries for exported files:
if ts_modified > last_synced:
dest_file = sync_path + to_file_full
dest_modified = Ulib.get_file_date(dest_file)
if ts_modified > dest_modified:
if "_Inbox" in sync_temp:
file_name = "_Inbox/" + to_file
else:
file_name = to_path + to_file
modified_date = datetime.datetime.\
fromtimestamp(ts_modified).strftime("%Y-%m-%d %H:%M:%S")
# file_name = re.sub(r"/\d\d - ", r"/", file_name)
file_name = re.sub(r" - [0-9a-f]{32}$", r"", file_name)
# Ulib.debug(190, file_name)
log.add_line("Sheet edited at: ", modified_date, file_name, " - Exported to:")
if ts_modified > marked_top_modified:
marked_top_modified = ts_modified
if ts_modified > marked_bottom_modified:
marked_bottom_modified = ts_modified
sub_paths = to_path.split("/")
group_path = sub_paths[0] + "/"
# make marked-file for top groups:
if group_path != last_group_path and last_group_path != "":
if make_marked_files:
marked_file = sync_temp + last_group_path + "_" + last_group_path[5:-1] + ".marked"
Ulib.write_file_modified(marked_file, marked_text_top, marked_top_modified)
marked_text_top = ""
marked_top_modified = 0
# Complete Markdown file for top level group:
Ulib.write_file(md_joined_path + last_group_path[:-1] + ".md", md_main_text)
md_main_text = ""
# make marked-file for bottom groups:
if to_path != last_path and last_path != "":
sub_paths = last_path.split("/")
pos = len(sub_paths) - 2
if make_marked_files:
marked_file = sync_temp + last_path + "_" + sub_paths[pos][5:] + ".marked"
Ulib.write_file_modified(marked_file, marked_text_bottom, marked_bottom_modified)
marked_bottom_modified = 0
marked_text_bottom = ""
comment = "" # "{>>@: " + to_file_full + "<<}\n"
md_main_text += comment + md_text.rstrip() + "\n\n\n"
to_file_first = to_file_full.replace(group_path, "")
marked_text_top += "<<[" + to_file_first + "]\n"
marked_text_bottom += "<<[" + to_file + ".md]\n"
last_group_path = group_path
last_path = to_path
# endfor line in file_list.split("\n")
# Write leftovers after end of for loop:
if make_marked_files:
if marked_text_top != "":
marked_file = sync_temp + last_group_path + "_" + last_group_path[5:-1] + ".marked"
Ulib.write_file_modified(marked_file, marked_text_top, marked_top_modified)
if marked_text_bottom != "":
sub_paths = last_path.split("/")
pos = len(sub_paths) - 2
marked_file = sync_temp + last_path + "_" + sub_paths[pos][5:] + ".marked"
Ulib.write_file_modified(marked_file, marked_text_bottom, marked_bottom_modified)
if md_main_text != "":
Ulib.write_file(md_joined_path + last_group_path[:-1] + ".md", md_main_text)
return
#end_def export_files
def main(ulysses_path, sync_path, md_joined_path):
print()
print("==============================================================================")
print("Exporting files ...")
print("From:", ulysses_path)
print(" --> ", sync_path)
print()
# WARNING! DO NOT CHANGE THIS FOLDER/ PATH:
sync_temp = HOME + ".ulysses_temp/"
ulgroup_path = ulysses_path + "Groups-ulgroup/"
file_list = ""
last_synced = 0
if os.path.exists(md_joined_path):
if md_joined_path != HOME and md_joined_path + "/" != HOME \
and "." not in md_joined_path:
# Make sure md_joined_path is not HOME path!!!
# We don't want to delete all users files by mistake!!!
shutil.rmtree(md_joined_path)
if not os.path.exists(md_joined_path):
os.makedirs(md_joined_path)
sync_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# log = Ulib.LogFileSheet(inbox_path, sync_date)
log = Ulib.LogFileSheet(ulgroup_path, sync_date)
if os.path.exists(sync_path):
sync_file = sync_path + ".ulysses_sync.log"
last_synced = Ulib.get_file_date(sync_file)
# Syncs markdown files changed since last sync,
# back to corresponding sheets MardownXL and XML in Ulysses library:
log.add_entry("**Markdown to Ulysses Sync:**")
Ulib.sync_files(sync_path, ulysses_path, log)
else:
os.makedirs(sync_path)
# Generate file list to be used by "export_files" below:
(file_list, pc) = Ulib.list_all_files(ulysses_path + "Groups-ulgroup/", "", 1,
add_ul_uuid_to_export_filenames)
# Exports all files in Library ulysses_path to temp path: sync_temp
# and also makes complete joined MD files for each Top Level Group to: md_joined_path
# Ulysses Markdown XL is converted to MultiMarkdown with CriticsMarkup or
# to plain Markdown combined with HTML comment-tags or span-tags. Set flag in lib-file
log.add_entry("**Ulysses to Markdown Export:**")
log.line_count = 0
export_files(file_list, sync_temp, md_joined_path, last_synced, log, sync_path)
# To include Default group (Unfiled-ulgroup or Inbox):
(file_list, pc) = Ulib.list_all_files(ulysses_path + "Unfiled-ulgroup/", "", 1,
add_ul_uuid_to_export_filenames)
export_files(file_list, sync_temp + "_Inbox/", md_joined_path, last_synced,
log, sync_path + "_Inbox/")
# Use rsync to copy files changed since last sync to export path: sync_path,
# and deletes files if sheet have been deleted in Ulysses.
subprocess.call(['rsync', '-t', '-r', '--delete', '--progress', sync_temp, sync_path])
# sync_file must be written here after rsync, otherwise rsync will delete the file.
sync_file = sync_path + ".ulysses_sync.log"
Ulib.write_file(sync_file, file_list)
# Extra check, just to make sure nothing bad happens:
if sync_temp == HOME or sync_temp.endswith(".") \
or sync_temp.strip() == "" or sync_temp.strip() == "/":
print("*** Warning BAD temp-folder name:", sync_temp)
print("*** Program aborted! Fix it in line 261!!")
quit()
else:
shutil.rmtree(sync_temp)
print()
print("Export Done to: " + sync_path)
log.write_log_sheet(False)
return log.get_md_log()
#end_def main(ulysses_path, sync_path):
# ================
# = Main Script =
# ================
main_log = ""
HOME = os.getenv("HOME", "") + "/"
ulysses_path_mac = HOME + "Library/Containers/com.soulmen.ulysses3/Data/"\
+ "Documents/Library/"
ulysses_path_icloud = HOME + "Library/Mobile Documents/X5AZV975AG~com~soulmen~ulysses3/"\
+ "Documents/Library/"
ulysses_path_demo = HOME + "Library/Containers/com.soulmen.ulysses3.demo/Data/"\
+ "Documents/Library/"
backup_ulysses(ulysses_path_mac, backup_path, "On My Mac")
backup_ulysses(ulysses_path_icloud, backup_path, "iCloud")
main_log += "Synced from: " + sync_path_mac + "\n"
main_log += main(ulysses_path_mac, sync_path_mac, md_joined_path + "On My Mac/")
main_log += "\nSynced from: " + sync_path_icloud + "\n"
main_log += main(ulysses_path_icloud, sync_path_icloud, md_joined_path + "iCloud/")
# main_log += "\nSynced from: " + sync_path_demo + "\n"
# main_log += main(ulysses_path_demo, sync_path_demo, md_joined_path + "Demo/")
# print()
print()
print("==============================================================================")
print(str(main_log.encode("utf-8")).replace("\\n", "\n")[2:-1].replace("\\xe2\\x80\\xa8", "\t"))
print("==============================================================================")
# Ulib.notify("Ulysses sync completed")
# print("==============================================================================")
| {
"content_hash": "ede6ed7c5800a5a2747b8a277d3385a7",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 99,
"avg_line_length": 40.23870967741936,
"alnum_prop": 0.5782427449094115,
"repo_name": "MrBlaschke/Ulysses3_to_md_sync",
"id": "61825b3947d298e7f25268865b7cfeab36680282",
"size": "15155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ulysses2md_export_sync_1_0_2.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import SimpleITK as sitk
import sys
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <input-1> <input-2>")
sys.exit(1)
# Two vector images of same pixel type and dimension expected
image_1 = sitk.ReadImage(sys.argv[1])
image_2 = sitk.ReadImage(sys.argv[2])
# Join two N-D Vector images to form an (N+1)-D image
join = sitk.JoinSeriesImageFilter()
joined_image = join.Execute(image_1, image_2)
# Extract first three channels of joined image (assuming RGB)
select = sitk.VectorIndexSelectionCastImageFilter()
channel1_image = select.Execute(joined_image, 0, sitk.sitkUInt8)
channel2_image = select.Execute(joined_image, 1, sitk.sitkUInt8)
channel3_image = select.Execute(joined_image, 2, sitk.sitkUInt8)
# Recompose image (should be same as joined_image)
compose = sitk.ComposeImageFilter()
composed_image = compose.Execute(channel1_image, channel2_image,
channel3_image)
# Select same subregion using image slicing operator
sliced_image = composed_image[100:400, 100:400, 0]
# Select same subregion using ExtractImageFilter
extract = sitk.ExtractImageFilter()
extract.SetSize([300, 300, 0])
extract.SetIndex([100, 100, 0])
extracted_image = extract.Execute(composed_image)
# Select same sub-region using CropImageFilter (NOTE: CropImageFilter cannot
# reduce dimensions unlike ExtractImageFilter, so cropped_image is a three
# dimensional image with depth of 1)
crop = sitk.CropImageFilter()
crop.SetLowerBoundaryCropSize([100, 100, 0])
crop.SetUpperBoundaryCropSize([composed_image.GetWidth() - 400,
composed_image.GetHeight() - 400, 1])
cropped_image = crop.Execute(composed_image)
| {
"content_hash": "2e7036c2e0fbc9222c0257e71e9ae26f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 38.83720930232558,
"alnum_prop": 0.7353293413173653,
"repo_name": "richardbeare/SimpleITK",
"id": "409932b654fa360a0474cb36d1ff6037ed6e341d",
"size": "2434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Examples/ImageGridManipulation/ImageGridManipulation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32664"
},
{
"name": "C#",
"bytes": "5324"
},
{
"name": "C++",
"bytes": "1933234"
},
{
"name": "CMake",
"bytes": "265951"
},
{
"name": "CSS",
"bytes": "31103"
},
{
"name": "Dockerfile",
"bytes": "1074"
},
{
"name": "HTML",
"bytes": "3744"
},
{
"name": "Java",
"bytes": "7242"
},
{
"name": "Lua",
"bytes": "25805"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "199006"
},
{
"name": "R",
"bytes": "54684"
},
{
"name": "SWIG",
"bytes": "2602002"
},
{
"name": "Shell",
"bytes": "109644"
},
{
"name": "Tcl",
"bytes": "3501"
}
],
"symlink_target": ""
} |
"""The super-group for the sql CLI.
The fact that this is a directory with
an __init__.py in it makes it a command group. The methods written below will
all be called by calliope (though they are all optional).
"""
import argparse
import os
import re
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import apis
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources as cloud_resources
from googlecloudsdk.core.credentials import store as c_store
_ACTIVE_VERSIONS = [
'v1beta3',
'v1beta4',
]
def _Args(parser):
parser.add_argument(
'--api-version',
help=argparse.SUPPRESS,
choices=_ACTIVE_VERSIONS,
action=actions.StoreProperty(
properties.VALUES.api_endpoint_overrides.sql))
def _DoFilter(context, api_version_default):
"""Set up and return the context to be used by all SQL release tracks."""
cloud_resources.REGISTRY.SetParamDefault(
api='sql', collection=None, param='project',
resolver=resolvers.FromProperty(properties.VALUES.core.project))
context['sql_client'] = apis.GetClientInstance('sql', api_version_default)
context['sql_messages'] = apis.GetMessagesModule('sql', api_version_default)
context['registry'] = cloud_resources.REGISTRY.Clone()
context['registry'].RegisterApiByName('sql', api_version_default)
return context
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SQL(base.Group):
"""Manage Cloud SQL databases."""
@staticmethod
def Args(parser):
_Args(parser)
@exceptions.RaiseToolExceptionInsteadOf(c_store.Error)
def Filter(self, context, args):
_DoFilter(context, 'v1beta3')
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SQLBeta(base.Group):
"""Manage Cloud SQL databases."""
@staticmethod
def Args(parser):
_Args(parser)
@exceptions.RaiseToolExceptionInsteadOf(c_store.Error)
def Filter(self, context, args):
_DoFilter(context, 'v1beta4')
| {
"content_hash": "78b60c5ae12239491c7f12763ef07e98",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 28.89041095890411,
"alnum_prop": 0.7472735893788526,
"repo_name": "KaranToor/MA450",
"id": "7e147efe5e3b0a1a1815d1bd47fa2f5de303a6dc",
"size": "2705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/sql/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
"""Factories for making test data"""
from factory import Faker
from factory.django import DjangoModelFactory, ImageField
from faker.providers import BaseProvider
from profiles.models import Profile, UserWebsite
class LocationProvider(BaseProvider):
"""Factory for location JSON"""
cities = [
"Kathmandu, मध्यमाञ्चल विकास क्षेत्र, Nepal",
"Paris, Île-de-France, France",
"Cairo, محافظة القاهرة, Egypt",
"Tokyo, 東京都, Japan",
"Medellín, Antioquia, Colombia",
]
def location(self):
"""Return location JSON with random city name"""
return {"value": self.random_element(self.cities)}
Faker.add_provider(LocationProvider)
class ProfileFactory(DjangoModelFactory):
"""Factory for Profiles"""
name = Faker("name")
image = Faker("file_path", extension="jpg")
image_small = Faker("file_path", extension="jpg")
image_medium = Faker("file_path", extension="jpg")
image_file = ImageField()
image_small_file = ImageField()
image_medium_file = ImageField()
email_optin = Faker("boolean")
location = Faker("location")
class Meta:
model = Profile
class UserWebsiteFactory(DjangoModelFactory):
"""Factory for UserWebsite"""
url = Faker("url")
class Meta:
model = UserWebsite
| {
"content_hash": "cafa50f6345a7651debe285087a14c8e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 58,
"avg_line_length": 23.98181818181818,
"alnum_prop": 0.6588324488248674,
"repo_name": "mitodl/open-discussions",
"id": "5487884a0aa85be0b7dec93f45f71b37b568a8e1",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
} |
import os
import random
import json
import collections
from PIL import Image
# Convert (r, g, b) into #rrggbb color
def getRGBstring( (r, g, b) ):
s = "#"
s = s + format(r, '02x')
s = s + format(g, '02x')
s = s + format(b, '02x')
return s
def do_compute():
# Open the image
origImgFile = 'res/bryce.jpg'
origImg = Image.open(origImgFile)
# Process the image
# Save the processed information
output = { 'file': origImgFile,
'freq': freq }
f = open("res/freq.json",'w')
s = json.dumps(output, indent = 4)
f.write(s)
| {
"content_hash": "b3bba37afda2917f1857135ca60a8251",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 38,
"avg_line_length": 16,
"alnum_prop": 0.5954861111111112,
"repo_name": "CS205IL-sp15/workbook",
"id": "194121d3f705439a553cceafc683d93f439e0c65",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_colorFreq_start/py/compute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "291"
},
{
"name": "HTML",
"bytes": "27022"
},
{
"name": "Handlebars",
"bytes": "522"
},
{
"name": "JavaScript",
"bytes": "116411"
},
{
"name": "Python",
"bytes": "58061"
}
],
"symlink_target": ""
} |
import unittest
from src.models.model import db
from src.models.user import User
from src.models.chat import Chat
from database.factories.model_factory import factory
class TestUser(unittest.TestCase):
def setUp(self):
db.begin_transaction()
def tearDown(self):
db.rollback()
def test_set_status_active(self):
user = factory(User).create(status=User.statuses['idle'])
user.set_status('active')
self.assertEqual(user.status, User.statuses['active'])
def test_set_status_wrong(self):
user = factory(User).create(status=User.statuses['idle'])
user.set_status('status_that_does_not_exist')
self.assertEqual(user.status, User.statuses['idle'])
def test_friend(self):
user = factory(User).create()
self.assertEqual(user.friend(), None)
def test_add_friend_with_one_user(self):
user_1 = factory(User).create()
self.assertFalse(user_1.add_friend())
def test_add_friend_with_two_users(self):
user_1 = factory(User).create(lang='ru')
user_2 = factory(User).create(lang='ru')
user_1.add_friend()
self.assertEqual(user_1.friend().id, user_2.id)
self.assertEqual(user_2.friend().id, user_1.id)
def test_add_friend_with_different_users(self):
active_user_1 = factory(User).create(status=User.statuses['active'], lang='ru')
active_user_2 = factory(User).create(status=User.statuses['active'], lang='ru')
factory(User).make(status=User.statuses['idle'], lang='ru')
factory(User).make(status=User.statuses['not_active'], lang='ru')
active_user_1.add_friend()
self.assertEqual(active_user_1.friend().id, active_user_2.id)
def test_del_friend(self):
user_1 = factory(User).create()
user_2 = factory(User).create()
self.__make_friends(user_1, user_2)
user_1.del_friend()
self.assertEqual(user_1.friend(), None)
@staticmethod
def __make_friends(user_1, user_2):
factory(Chat).create(
user_1=user_1.id,
user_2=user_2.id
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "820a6b02e964292f8f111a1b8c867e3b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 87,
"avg_line_length": 30.053333333333335,
"alnum_prop": 0.6051464063886424,
"repo_name": "erjanmx/salam-bot",
"id": "da4e50d3864f6ca98975b51973aeade6d65fc16f",
"size": "2254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29313"
}
],
"symlink_target": ""
} |
from src.game.location import Location
from src.game.place import Place
l = None
def test_init():
global l
l = Location(None, 10, 10, "A testing location")
assert l.place == None
assert l.x == 10
assert l.y == 10
assert l.name == "A testing location"
| {
"content_hash": "e75fef46723a810487cde9392f205d4d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 52,
"avg_line_length": 19.928571428571427,
"alnum_prop": 0.6415770609318996,
"repo_name": "lexwraith/ThisIsBob",
"id": "331fafc382a658cd748defb780c09659a1129cdf",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_location.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14378"
}
],
"symlink_target": ""
} |
import oauth2
from bind import bind_method
from models import MediaShortcode, Media, User, Location, Tag, Comment, Relationship
MEDIA_ACCEPT_PARAMETERS = ["count", "max_id"]
SEARCH_ACCEPT_PARAMETERS = ["q", "count"]
SUPPORTED_FORMATS = ['json']
class InstagramAPI(oauth2.OAuth2API):
host = "api.instagram.com"
base_path = "/v1"
access_token_field = "access_token"
authorize_url = "https://api.instagram.com/oauth/authorize"
access_token_url = "https://api.instagram.com/oauth/access_token"
protocol = "https"
api_name = "Instagram"
x_ratelimit_remaining = None
x_ratelimit = None
def __init__(self, *args, **kwargs):
format = kwargs.get('format', 'json')
if format in SUPPORTED_FORMATS:
self.format = format
else:
raise Exception("Unsupported format")
super(InstagramAPI, self).__init__(*args, **kwargs)
media_popular = bind_method(
path="/media/popular",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS,
root_class=Media)
media_search = bind_method(
path="/media/search",
accepts_parameters=SEARCH_ACCEPT_PARAMETERS + ['lat', 'lng', 'min_timestamp', 'max_timestamp', 'distance'],
root_class=Media)
media_shortcode = bind_method(
path="/media/shortcode/{shortcode}",
accepts_parameters=['shortcode'],
response_type="entry",
root_class=MediaShortcode)
media_likes = bind_method(
path="/media/{media_id}/likes",
accepts_parameters=['media_id'],
root_class=User)
like_media = bind_method(
path="/media/{media_id}/likes",
method="POST",
signature=True,
accepts_parameters=['media_id'],
response_type="empty")
unlike_media = bind_method(
path="/media/{media_id}/likes",
method="DELETE",
signature=True,
accepts_parameters=['media_id'],
response_type="empty")
create_media_comment = bind_method(
path="/media/{media_id}/comments",
method="POST",
signature=True,
accepts_parameters=['media_id', 'text'],
response_type="empty",
root_class=Comment)
delete_comment = bind_method(
path="/media/{media_id}/comments/{comment_id}",
method="DELETE",
signature=True,
accepts_parameters=['media_id', 'comment_id'],
response_type="empty")
media_comments = bind_method(
path="/media/{media_id}/comments",
method="GET",
accepts_parameters=['media_id'],
response_type="list",
root_class=Comment)
media = bind_method(
path="/media/{media_id}",
accepts_parameters=['media_id'],
response_type="entry",
root_class=Media)
user_media_feed = bind_method(
path="/users/self/feed",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS,
root_class=Media,
paginates=True)
user_liked_media = bind_method(
path="/users/self/media/liked",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS,
root_class=Media,
paginates=True)
user_recent_media = bind_method(
path="/users/{user_id}/media/recent",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS + ['user_id'],
root_class=Media,
paginates=True)
user_search = bind_method(
path="/users/search",
accepts_parameters=SEARCH_ACCEPT_PARAMETERS,
root_class=User)
user_follows = bind_method(
path="/users/{user_id}/follows",
accepts_parameters=["user_id"],
paginates=True,
root_class=User)
user_followed_by = bind_method(
path="/users/{user_id}/followed-by",
accepts_parameters=["user_id"],
paginates=True,
root_class=User)
user = bind_method(
path="/users/{user_id}",
accepts_parameters=["user_id"],
root_class=User,
response_type="entry")
location_recent_media = bind_method(
path="/locations/{location_id}/media/recent",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS + ['location_id'],
root_class=Media,
paginates=True)
location_search = bind_method(
path="/locations/search",
accepts_parameters=SEARCH_ACCEPT_PARAMETERS + ['lat', 'lng', 'foursquare_id', 'foursquare_v2_id'],
root_class=Location)
location = bind_method(
path="/locations/{location_id}",
accepts_parameters=["location_id"],
root_class=Location,
response_type="entry")
geography_recent_media = bind_method(
path="/geographies/{geography_id}/media/recent",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS + ["geography_id"],
root_class=Media,
paginates=True)
tag_recent_media = bind_method(
path="/tags/{tag_name}/media/recent",
accepts_parameters=MEDIA_ACCEPT_PARAMETERS + ['tag_name'],
root_class=Media,
paginates=True)
tag_search = bind_method(
path="/tags/search",
accepts_parameters=SEARCH_ACCEPT_PARAMETERS,
root_class=Tag,
paginates=True)
tag = bind_method(
path="/tags/{tag_name}",
accepts_parameters=["tag_name"],
root_class=Tag,
response_type="entry")
user_incoming_requests = bind_method(
path="/users/self/requested-by",
root_class=User)
change_user_relationship = bind_method(
method="POST",
path="/users/{user_id}/relationship",
signature=True,
root_class=Relationship,
accepts_parameters=["user_id", "action"],
paginates=True,
requires_target_user=True,
response_type="entry")
user_relationship = bind_method(
method="GET",
path="/users/{user_id}/relationship",
root_class=Relationship,
accepts_parameters=["user_id"],
paginates=False,
requires_target_user=True,
response_type="entry")
def _make_relationship_shortcut(action):
def _inner(self, *args, **kwargs):
return self.change_user_relationship(user_id=kwargs.get("user_id"),
action=action)
return _inner
follow_user = _make_relationship_shortcut('follow')
unfollow_user = _make_relationship_shortcut('unfollow')
block_user = _make_relationship_shortcut('block')
unblock_user = _make_relationship_shortcut('unblock')
approve_user_request = _make_relationship_shortcut('approve')
ignore_user_request = _make_relationship_shortcut('ignore')
def _make_subscription_action(method, include=None, exclude=None):
accepts_parameters = ["object",
"aspect",
"object_id", # Optional if subscribing to all users
"callback_url",
"lat", # Geography
"lng", # Geography
"radius", # Geography
"verify_token"]
if include:
accepts_parameters.extend(include)
if exclude:
accepts_parameters = [x for x in accepts_parameters if x not in exclude]
signature = False if method == 'GET' else True
return bind_method(
path="/subscriptions",
method=method,
accepts_parameters=accepts_parameters,
include_secret=True,
objectify_response=False,
signature=signature,
)
create_subscription = _make_subscription_action('POST')
list_subscriptions = _make_subscription_action('GET')
delete_subscriptions = _make_subscription_action('DELETE', exclude=['object_id'], include=['id'])
| {
"content_hash": "4fa122191e43114cc8af70cc6cdc0da0",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 123,
"avg_line_length": 36.39330543933055,
"alnum_prop": 0.5295470223039779,
"repo_name": "trevornelson/squeeklights",
"id": "ad479e27696e2bba285f12b0e9912977bce87397",
"size": "8698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instagram/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "16857"
},
{
"name": "Batchfile",
"bytes": "141"
},
{
"name": "C",
"bytes": "106954"
},
{
"name": "CSS",
"bytes": "23426"
},
{
"name": "HTML",
"bytes": "34315"
},
{
"name": "JavaScript",
"bytes": "874395"
},
{
"name": "Python",
"bytes": "302892"
},
{
"name": "Shell",
"bytes": "1119"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.template import Template
from django.test import TestCase
from django_dynamic_fixture import N, G
from entity.models import Entity, EntityKind, EntityRelationship
from freezegun import freeze_time
from mock import patch, call, Mock
from six import text_type
from entity_event.models import (
Medium, Source, SourceGroup, Unsubscription, Subscription, Event, EventActor, EventSeen,
RenderingStyle, ContextRenderer, _unseen_event_ids, SubscriptionQuerySet,
EventQuerySet, EventManager
)
from entity_event.tests.models import TestFKModel
class EventRenderTest(TestCase):
"""
Does an entire integration test for rendering events relative to mediums.
"""
def test_one_context_renderer_one_medium_w_additional_context(self):
rg = G(RenderingStyle)
s = G(Source)
G(
ContextRenderer, source=s, rendering_style=rg, text_template_path='test_template.txt',
html_template_path='test_template.html', context_hints={
'fk_model': {
'app_name': 'tests',
'model_name': 'TestFKModel',
}
})
m = G(Medium, rendering_style=rg, additional_context={'suppress_value': True})
fkm = G(TestFKModel, value=100)
G(Event, source=s, context={'fk_model': fkm.id})
events = Event.objects.all().load_contexts_and_renderers(m)
txt, html = events[0].render(m)
self.assertEquals(txt, 'Test text template with value 100')
self.assertEquals(html, 'Test html template with value suppressed')
def test_one_context_renderer_one_medium(self):
rg = G(RenderingStyle)
s = G(Source)
G(
ContextRenderer, source=s, rendering_style=rg, text_template_path='test_template.txt',
html_template_path='test_template.html', context_hints={
'fk_model': {
'app_name': 'tests',
'model_name': 'TestFKModel',
}
})
m = G(Medium, rendering_style=rg)
fkm = G(TestFKModel, value=100)
G(Event, source=s, context={'fk_model': fkm.id})
events = Event.objects.all().load_contexts_and_renderers(m)
txt, html = events[0].render(m)
self.assertEquals(txt, 'Test text template with value 100')
self.assertEquals(html, 'Test html template with value 100')
def test_wo_fetching_contexts(self):
rg = G(RenderingStyle)
s = G(Source)
G(
ContextRenderer, source=s, rendering_style=rg, text_template_path='test_template.txt',
html_template_path='test_template.html', context_hints={
'fk_model': {
'app_name': 'tests',
'model_name': 'TestFKModel',
}
})
m = G(Medium, rendering_style=rg)
fkm = G(TestFKModel, value=100)
e = G(Event, source=s, context={'fk_model': fkm.id})
with self.assertRaises(RuntimeError):
e.render(m)
def test_get_serialized_context(self):
rg = G(RenderingStyle)
s = G(Source)
G(
ContextRenderer, source=s, rendering_style=rg, text_template_path='test_template.txt',
html_template_path='test_template.html', context_hints={
'fk_model': {
'app_name': 'tests',
'model_name': 'TestFKModel',
}
})
m = G(Medium, rendering_style=rg, additional_context={'suppress_value': True})
fkm = G(TestFKModel, value='100')
G(Event, source=s, context={'fk_model': fkm.id})
event = Event.objects.all().load_contexts_and_renderers(m)[0]
# Call the method
response = event.get_serialized_context(m)
# Assert we have a proper response
self.assertEqual(
response,
{
'suppress_value': True,
'fk_model': {
'id': fkm.id,
'value': fkm.value
}
}
)
def test_get_serialized_context_wo_fetching_context(self):
rg = G(RenderingStyle)
s = G(Source)
G(
ContextRenderer, source=s, rendering_style=rg, text_template_path='test_template.txt',
html_template_path='test_template.html', context_hints={
'fk_model': {
'app_name': 'tests',
'model_name': 'TestFKModel',
}
})
m = G(Medium, rendering_style=rg, additional_context={'suppress_value': True})
fkm = G(TestFKModel, value='100')
event = G(Event, source=s, context={'fk_model': fkm.id})
with self.assertRaises(RuntimeError):
event.get_serialized_context(m)
class EventManagerCreateEventTest(TestCase):
def test_create_event_no_actors(self):
source = G(Source)
e = Event.objects.create_event(context={'hi': 'hi'}, source=source)
self.assertEqual(e.source, source)
self.assertEqual(e.context, {'hi': 'hi'})
self.assertEqual(e.uuid, '')
self.assertFalse(EventActor.objects.exists())
def test_create_event_multiple_actor_pks(self):
source = G(Source)
actors = [G(Entity), G(Entity)]
e = Event.objects.create_event(context={'hi': 'hi'}, source=source, actors=[a.id for a in actors], uuid='hi')
self.assertEqual(e.source, source)
self.assertEqual(e.context, {'hi': 'hi'})
self.assertEqual(e.uuid, 'hi')
self.assertEqual(
set(EventActor.objects.filter(event=e).values_list('entity', flat=True)), set([a.id for a in actors]))
def test_create_event_multiple_actors(self):
source = G(Source)
actors = [G(Entity), G(Entity)]
e = Event.objects.create_event(context={'hi': 'hi'}, source=source, actors=actors, uuid='hi')
self.assertEqual(e.source, source)
self.assertEqual(e.context, {'hi': 'hi'})
self.assertEqual(e.uuid, 'hi')
self.assertEqual(
set(EventActor.objects.filter(event=e).values_list('entity', flat=True)), set([a.id for a in actors]))
def test_ignore_duplicates_w_uuid_doesnt_already_exist(self):
source = G(Source)
e = Event.objects.create_event(context={'hi': 'hi'}, source=source, uuid='1', ignore_duplicates=True)
self.assertIsNotNone(e)
def test_ignore_duplicates_w_uuid_already_exist(self):
source = G(Source)
Event.objects.create_event(context={'hi': 'hi'}, source=source, uuid='1', ignore_duplicates=True)
e = Event.objects.create_event(context={'hi': 'hi'}, source=source, uuid='1', ignore_duplicates=True)
self.assertIsNone(e)
def test_ignore_duplicates_wo_uuid_already_exist(self):
source = G(Source)
Event.objects.create_event(context={'hi': 'hi'}, source=source, ignore_duplicates=True)
e = Event.objects.create_event(context={'hi': 'hi'}, source=source, ignore_duplicates=True)
self.assertIsNone(e)
def test_create_events(self):
"""
Tests the bulk event creation to make sure all data gets set correctly
"""
source = G(Source)
Event.objects.create_event(context={'hi': 'hi'}, source=source, ignore_duplicates=True)
actor1 = G(Entity)
actor2 = G(Entity)
actor3 = G(Entity)
actor4 = G(Entity)
event_kwargs = [{
'context': {'one': 'one'},
'source': source,
'ignore_duplicates': True,
'actors': [actor1, actor2],
'uuid': '1'
}, {
'context': {'two': 'two'},
'source': source,
'ignore_duplicates': True,
'actors': [actor2],
'uuid': '2'
}]
events = Event.objects.create_events(event_kwargs)
events.sort(key=lambda x: x.uuid)
self.assertEqual(len(events), 2)
self.assertEqual(events[0].uuid, '1')
self.assertEqual(events[0].context['one'], 'one')
self.assertEqual(events[0].source, source)
self.assertEqual(
{event_actor.entity_id for event_actor in events[0].eventactor_set.all()},
set([actor1.id, actor2.id])
)
self.assertEqual(events[1].uuid, '2')
self.assertEqual(events[1].context['two'], 'two')
self.assertEqual(events[1].source, source)
self.assertEqual(
{event_actor.entity_id for event_actor in events[1].eventactor_set.all()},
set([actor2.id])
)
# Add some events where one is a duplicate
event_kwargs = [{
'context': {'one': 'one'},
'source': source,
'ignore_duplicates': True,
'actors': [actor3, actor4],
'uuid': '1'
}, {
'context': {'three': 'three'},
'source': source,
'ignore_duplicates': True,
'actors': [actor3],
'uuid': '3'
}]
events = Event.objects.create_events(event_kwargs)
self.assertEqual(len(events), 1)
self.assertEqual(events[0].uuid, '3')
self.assertEqual(events[0].context['three'], 'three')
self.assertEqual(events[0].source, source)
self.assertEqual(
{event_actor.entity_id for event_actor in events[0].eventactor_set.all()},
set([actor3.id, actor3.id])
)
# All duplicates
event_kwargs = [{
'context': {'one': 'one'},
'source': source,
'ignore_duplicates': True,
'actors': [actor3, actor4],
'uuid': '1'
}, {
'context': {'three': 'three'},
'source': source,
'ignore_duplicates': True,
'actors': [actor3],
'uuid': '3'
}]
events = Event.objects.create_events(event_kwargs)
self.assertEqual(len(events), 0)
class EventManagerQuerySetTest(TestCase):
def setUp(self):
# Call the super setup
super(EventManagerQuerySetTest, self).setUp()
# Create a query set reference
self.queryset = EventQuerySet()
# Create a manager reference
self.manager = EventManager()
def test_mark_seen(self):
event = G(Event, context={})
medium = G(Medium)
Event.objects.mark_seen(medium)
self.assertEqual(EventSeen.objects.count(), 1)
self.assertTrue(EventSeen.objects.filter(event=event, medium=medium).exists())
@patch('entity_event.context_loader.load_contexts_and_renderers', spec_set=True)
def test_load_contexts_and_renderers(self, mock_load_contexts_and_renderers):
e = G(Event, context={})
medium = G(Medium)
Event.objects.load_contexts_and_renderers(medium)
self.assertEquals(mock_load_contexts_and_renderers.call_count, 1)
self.assertEquals(list(mock_load_contexts_and_renderers.call_args_list[0][0][0]), [e])
self.assertEquals(mock_load_contexts_and_renderers.call_args_list[0][0][1], [medium])
@patch.object(EventManager, 'get_queryset', autospec=True)
def test_cache_related(self, mock_get_queryset):
# Setup some mock return values
mock_get_queryset.return_value = Mock(EventQuerySet(), autospec=True)
# Call the method
self.manager.cache_related()
# Assert that we called get queryset
mock_get_queryset.assert_called_once_with(self.manager)
class MediumEventsInterfacesTest(TestCase):
def setUp(self):
# Call the parent setup
super(MediumEventsInterfacesTest, self).setUp()
# Set Up Entities and Relationships
everyone_kind = G(EntityKind, name='all', display_name='all')
group_kind = G(EntityKind, name='group', display_name='Group')
self.person_kind = G(EntityKind, name='person', display_name='Person')
# Setup people entities
self.p1 = G(Entity, entity_kind=self.person_kind, display_name='p1')
self.p2 = G(Entity, entity_kind=self.person_kind, display_name='p2')
self.p3 = G(Entity, entity_kind=self.person_kind, display_name='p3')
p4 = G(Entity, entity_kind=self.person_kind, display_name='p4')
# Setup group entities
g1 = G(Entity, entity_kind=group_kind)
g2 = G(Entity, entity_kind=group_kind)
# Setup the global entity
everyone = G(Entity, entity_kind=everyone_kind)
# Assign entity relationships
# p1 and p2 are in group1, p3 and p4 are in group2
for sup, sub in [(g1, self.p1), (g1, self.p2), (g2, self.p3), (g2, p4)]:
G(EntityRelationship, super_entity=sup, sub_entity=sub)
# All people are in the everyone group
for p in [self.p1, self.p2, self.p3, p4]:
G(EntityRelationship, super_entity=everyone, sub_entity=p)
# Set up Mediums, Sources, Subscriptions, Events
# We are creating 4 events
# 2 for source a, 1 has p1 as an actor, the other has no actors
# 1 for source b, for p2
# 1 for source c, for p2 and p3
self.medium_x = G(Medium, name='x', display_name='x')
self.medium_y = G(Medium, name='y', display_name='y')
self.medium_z = G(Medium, name='z', display_name='z')
self.source_a = G(Source, name='a', display_name='a')
self.source_b = G(Source, name='b', display_name='b')
self.source_c = G(Source, name='c', display_name='c')
e1 = G(Event, source=self.source_a, context={})
G(Event, source=self.source_a, context={})
e3 = G(Event, source=self.source_b, context={})
e4 = G(Event, source=self.source_c, context={})
G(EventActor, event=e1, entity=self.p1)
G(EventActor, event=e3, entity=self.p2)
G(EventActor, event=e4, entity=self.p2)
G(EventActor, event=e4, entity=self.p3)
# Create subscriptions
# Source a is subscribed to medium x, for everyone of person not following
# source a is subscribed to medium y, for everyone of person, following
# source c is subscribed to medium z, for group1, following
G(
Subscription,
source=self.source_a,
medium=self.medium_x,
only_following=False,
entity=everyone,
sub_entity_kind=self.person_kind
)
G(
Subscription,
source=self.source_a,
medium=self.medium_y,
only_following=True,
entity=everyone,
sub_entity_kind=self.person_kind
)
G(
Subscription,
source=self.source_c,
medium=self.medium_z,
only_following=True,
entity=g1,
sub_entity_kind=self.person_kind
)
def test_events_basic(self):
events = self.medium_x.events()
self.assertEqual(events.count(), 2)
def test_events_only_following(self):
events = self.medium_y.events()
self.assertEqual(events.count(), 1)
def test_entity_events_basic(self):
events = self.medium_x.entity_events(entity=self.p1)
self.assertEqual(len(events), 2)
def test_entity_events_basic_mark_seen(self):
events = self.medium_x.entity_events(
entity=self.p1,
seen=False,
mark_seen=True
)
self.assertEqual(len(events), 2)
# The other medium should also get marked as seen
self.assertEqual(len(EventSeen.objects.all()), 4)
def test_entity_events_basic_unsubscribed(self):
G(Unsubscription, entity=self.p1, source=self.source_a, medium=self.medium_x)
G(Event, source=self.source_b, context={})
G(Subscription, source=self.source_b, medium=self.medium_x, only_following=False,
entity=self.p1, sub_entity_kind=None)
events = self.medium_x.entity_events(entity=self.p1)
self.assertEqual(len(events), 2)
for event in events:
self.assertEqual(event.source, self.source_b)
def test_entity_events_only_following(self):
events = self.medium_z.entity_events(entity=self.p2)
self.assertEqual(len(events), 1)
def test_entity_targets_basic(self):
events_targets = self.medium_x.events_targets()
self.assertEqual(len(events_targets), 2)
def test_entity_targets_target_count(self):
events_targets = self.medium_x.events_targets(entity_kind=self.person_kind)
self.assertEqual(len(events_targets[0][1]), 4)
def test_entity_targets_only_following(self):
events_targets = self.medium_z.events_targets(entity_kind=self.person_kind)
self.assertEqual(len(events_targets[0][1]), 1)
class MediumTest(TestCase):
def test_events_targets_start_time(self):
"""
Makes sure that only events created after the medium are returned
"""
entity = G(Entity)
source1 = G(Source)
source2 = G(Source)
# Make some events before the mediums
G(Event, uuid='1', source=source1, context={})
G(Event, uuid='2', source=source1, context={})
G(Event, uuid='3', source=source2, context={})
G(Event, uuid='4', source=source2, context={})
medium1 = G(Medium)
medium2 = G(Medium)
# Make events after the mediums
G(Event, uuid='5', source=source1, context={})
G(Event, uuid='6', source=source2, context={})
# Make subscriptions to different sources and mediums
G(Subscription, medium=medium1, source=source1, entity=entity, only_following=False)
G(Subscription, medium=medium2, source=source1, entity=entity, only_following=False)
# Get all events for medium 1
events = []
for event, targets in medium1.events_targets(start_time=medium1.time_created):
events.append(event)
# There should only be 1 event for medium 1 after the mediums were made
self.assertEqual(len(events), 1)
class MediumRenderTest(TestCase):
@patch('entity_event.context_loader.load_contexts_and_renderers', spec_set=True)
def test_render(self, mock_load_contexts_and_renderers):
medium = N(Medium)
e1 = Mock(render=Mock(return_value=('e1.txt', 'e1.html')))
e2 = Mock(render=Mock(return_value=('e2.txt', 'e2.html')))
events = [e1, e2]
res = medium.render(events)
mock_load_contexts_and_renderers.assert_called_once_with(events, [medium])
self.assertEquals(res, {
e1: ('e1.txt', 'e1.html'),
e2: ('e2.txt', 'e2.html'),
})
e1.render.assert_called_once_with(medium)
e2.render.assert_called_once_with(medium)
class MediumSubsetSubscriptionsTest(TestCase):
def setUp(self):
person = G(EntityKind, name='person', display_name='Person')
self.super_e = G(Entity)
self.sub_e = G(Entity, entity_kind=person)
random = G(Entity)
G(EntityRelationship, super_entity=self.super_e, sub_entity=self.sub_e)
self.medium = G(Medium)
self.group_sub = G(Subscription, entity=self.super_e, sub_entity_kind=person)
self.indiv_sub = G(Subscription, entity=self.sub_e, sub_entity_kind=None)
self.random_sub = G(Subscription, entity=random)
def test_no_entity(self):
all_subs = Subscription.objects.all()
subs = self.medium.subset_subscriptions(all_subs)
self.assertEqual(subs, all_subs)
def test_sub_entity(self):
all_subs = Subscription.objects.all()
subs = self.medium.subset_subscriptions(all_subs, self.sub_e)
self.assertEqual(subs.count(), 2)
def test_super_not_included(self):
all_subs = Subscription.objects.all()
subs = self.medium.subset_subscriptions(all_subs, self.super_e)
self.assertEqual(subs.count(), 0)
class MediumGetFilteredEventsTest(TestCase):
def setUp(self):
super(MediumGetFilteredEventsTest, self).setUp()
self.entity = G(Entity)
self.medium = G(Medium)
self.source = G(Source)
G(Subscription, medium=self.medium, source=self.source, entity=self.entity, only_following=False)
def test_get_unseen_events_some_seen_some_not(self):
seen_e = G(Event, context={}, source=self.source)
G(EventSeen, event=seen_e, medium=self.medium)
unseen_e = G(Event, context={}, source=self.source)
events = self.medium.get_filtered_events(seen=False)
self.assertEquals(list(events), [unseen_e])
def test_get_unseen_events_some_seen_from_other_mediums(self):
seen_from_other_medium_e = G(Event, context={})
seen_from_medium_event = G(Event, context={}, source=self.source)
unseen_e = G(Event, context={}, source=self.source)
G(EventSeen, event=seen_from_other_medium_e)
G(EventSeen, event=seen_from_medium_event)
events = self.medium.get_filtered_events(seen=False)
self.assertEquals(set(events), {unseen_e, seen_from_medium_event, seen_from_other_medium_e})
class MediumGetEventFiltersTest(TestCase):
def setUp(self):
# Call the parent setup
super(MediumGetEventFiltersTest, self).setUp()
# Create some entities
self.entity = G(Entity)
self.entity2 = G(Entity)
# Create some sources
self.source = G(Source)
self.source2 = G(Source)
# Make a couple mediums
self.medium = G(Medium)
self.medium2 = G(Medium)
# Make subscriptions to different sources and mediums
G(Subscription, medium=self.medium, source=self.source, entity=self.entity, only_following=False)
G(Subscription, medium=self.medium2, source=self.source2, entity=self.entity2, only_following=False)
with freeze_time('2014-01-15'):
self.event1 = G(Event, context={}, source=self.source)
self.event2 = G(Event, context={}, source=self.source, time_expires=datetime(5000, 1, 1))
with freeze_time('2014-01-17'):
self.event3 = G(Event, context={}, source=self.source)
self.event4 = G(Event, context={}, source=self.source2)
self.event5 = G(Event, context={}, source=self.source2, time_expires=datetime(2014, 1, 17))
# Mark one event as seen by the medium
G(EventSeen, event=self.event1, medium=self.medium)
self.actor = G(Entity)
G(EventActor, event=self.event1, entity=self.actor)
def test_start_time(self):
events = self.medium.get_filtered_events_queryset(
start_time=datetime(2014, 1, 16),
end_time=None,
seen=None,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 3)
events = self.medium2.get_filtered_events_queryset(
start_time=datetime(2014, 1, 16),
end_time=None,
seen=None,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 3)
def test_end_time(self):
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=datetime(2014, 1, 16),
seen=None,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 2)
events = self.medium2.get_filtered_events_queryset(
start_time=None,
end_time=datetime(2014, 1, 16),
seen=None,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 2)
def test_is_seen(self):
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=True,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 1)
events = self.medium2.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=True,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 0)
def test_is_not_seen(self):
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 4)
# Make sure these are the events we expect
event_ids = {event.id for event in events}
expected_ids = {self.event2.id, self.event3.id, self.event4.id, self.event5.id}
self.assertEqual(event_ids, expected_ids)
# Mark these all as seen
Event.objects.filter(id__in=expected_ids).mark_seen(self.medium)
# Make sure there are no unseen for medium1
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 0)
# Make sure there we still have unseen for medium 2
events = self.medium2.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 5)
# Delete one of the events from seen
EventSeen.objects.filter(medium=self.medium, event=self.event3).delete()
# Make sure there is one unseen
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].id, self.event3.id)
# Mark these all as seen
Event.objects.filter(id=self.event3.id).mark_seen(self.medium)
# Make sure there are no unseen
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 0)
# Make a new event
self.event6 = G(Event, context={}, source=self.source)
# Make sure the new event shows up
# Make sure there is one unseen
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].id, self.event6.id)
# Check the other medium
events = self.medium2.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=False,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 6)
def test_include_expires(self):
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=None,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 5)
events = self.medium2.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=None,
include_expired=True,
actor=None
)
self.assertEqual(events.count(), 5)
def test_dont_include_expires(self):
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=None,
include_expired=False,
actor=None
)
self.assertEqual(events.count(), 4)
events = self.medium2.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=None,
include_expired=False,
actor=None
)
self.assertEqual(events.count(), 4)
def test_actor(self):
events = self.medium.get_filtered_events_queryset(
start_time=None,
end_time=None,
seen=None,
include_expired=True,
actor=self.actor
)
self.assertEqual(events.count(), 1)
class MediumFollowedByTest(TestCase):
def setUp(self):
self.medium = N(Medium)
self.superentity = G(Entity)
self.sub1, self.sub2 = G(Entity), G(Entity)
G(EntityRelationship, super_entity=self.superentity, sub_entity=self.sub1)
G(EntityRelationship, super_entity=self.superentity, sub_entity=self.sub2)
def test_self_in(self):
followers = self.medium.followed_by(self.sub1)
super_entity_in = followers.filter(id=self.sub1.id).exists()
self.assertTrue(super_entity_in)
def test_super_entities_in(self):
followers = self.medium.followed_by(self.sub1)
sub_entity_in = followers.filter(id=self.superentity.id).exists()
self.assertTrue(sub_entity_in)
def test_others_not_in(self):
followers = self.medium.followed_by(self.sub1)
random_entity_in = followers.filter(id=self.sub2.id).exists()
self.assertFalse(random_entity_in)
def test_multiple_inputs_list(self):
followers = self.medium.followed_by([self.sub1.id, self.sub2.id])
self.assertEqual(followers.count(), 3)
def test_multiple_inputs_qs(self):
entities = Entity.objects.filter(id__in=[self.sub1.id, self.sub2.id])
followers = self.medium.followed_by(entities)
self.assertEqual(followers.count(), 3)
class MediumFollowersOfTest(TestCase):
def setUp(self):
self.medium = N(Medium)
self.superentity = G(Entity)
self.sub1, self.sub2 = G(Entity), G(Entity)
self.random_entity = G(Entity)
G(EntityRelationship, super_entity=self.superentity, sub_entity=self.sub1)
G(EntityRelationship, super_entity=self.superentity, sub_entity=self.sub2)
def test_self_in(self):
followers = self.medium.followers_of(self.superentity)
super_entity_in = followers.filter(id=self.superentity.id).exists()
self.assertTrue(super_entity_in)
def test_sub_entities_in(self):
followers = self.medium.followers_of(self.superentity)
sub_entity_in = followers.filter(id=self.sub1.id).exists()
self.assertTrue(sub_entity_in)
def test_others_not_in(self):
followers = self.medium.followers_of(self.superentity)
random_entity_in = followers.filter(id=self.random_entity.id).exists()
self.assertFalse(random_entity_in)
def test_multiple_inputs_list(self):
followers = self.medium.followers_of([self.sub1.id, self.sub2.id])
self.assertEqual(followers.count(), 2)
def test_multiple_inputs_qs(self):
entities = Entity.objects.filter(id__in=[self.sub1.id, self.sub2.id])
followers = self.medium.followers_of(entities)
self.assertEqual(followers.count(), 2)
class SubscriptionSubscribedEntitiesTest(TestCase):
def setUp(self):
person_kind = G(EntityKind, name='person', display_name='person')
superentity = G(Entity)
sub1, sub2 = G(Entity, entity_kind=person_kind), G(Entity, entity_kind=person_kind)
G(EntityRelationship, super_entity=superentity, sub_entity=sub1)
G(EntityRelationship, super_entity=superentity, sub_entity=sub2)
self.group_sub = N(Subscription, entity=superentity, sub_entity_kind=person_kind)
self.indiv_sub = N(Subscription, entity=superentity, sub_entity_kind=None)
def test_both_branches_return_queryset(self):
group_qs = self.group_sub.subscribed_entities()
indiv_qs = self.indiv_sub.subscribed_entities()
self.assertEqual(type(group_qs), type(indiv_qs))
def test_length_group(self):
group_qs = self.group_sub.subscribed_entities()
self.assertEqual(group_qs.count(), 2)
def test_length_indiv(self):
indiv_qs = self.indiv_sub.subscribed_entities()
self.assertEqual(indiv_qs.count(), 1)
class ContextRendererRenderTextOrHtmlTemplateTest(TestCase):
@patch('entity_event.models.render_to_string')
def test_w_html_template_path(self, mock_render_to_string):
cr = N(ContextRenderer, html_template_path='html_path')
c = {'context': 'context'}
cr.render_text_or_html_template(c, is_text=False)
mock_render_to_string.assert_called_once_with('html_path', c)
@patch('entity_event.models.render_to_string')
def test_w_text_template_path(self, mock_render_to_string):
cr = N(ContextRenderer, text_template_path='text_path')
c = {'context': 'context'}
cr.render_text_or_html_template(c, is_text=True)
mock_render_to_string.assert_called_once_with('text_path', c)
@patch.object(Template, '__init__', return_value=None)
@patch.object(Template, 'render')
def test_w_html_template(self, mock_render, mock_init):
cr = N(ContextRenderer, html_template='html_template')
c = {'context': 'context'}
cr.render_text_or_html_template(c, is_text=False)
self.assertEqual(mock_render.call_count, 1)
mock_init.assert_called_once_with('html_template')
@patch.object(Template, '__init__', return_value=None)
@patch.object(Template, 'render')
def test_w_text_template(self, mock_render, mock_init):
cr = N(ContextRenderer, text_template='text_template')
c = {'context': 'context'}
cr.render_text_or_html_template(c, is_text=True)
self.assertEqual(mock_render.call_count, 1)
mock_init.assert_called_once_with('text_template')
def test_w_no_templates_text(self):
cr = N(ContextRenderer)
c = {'context': 'context'}
self.assertEqual(cr.render_text_or_html_template(c, is_text=True), '')
def test_w_no_templates_html(self):
cr = N(ContextRenderer)
c = {'context': 'context'}
self.assertEqual(cr.render_text_or_html_template(c, is_text=False), '')
class ContextRendererRenderContextToTextHtmlTemplates(TestCase):
@patch.object(ContextRenderer, 'render_text_or_html_template', spec_set=True)
def test_render_context_to_text_html_templates(self, mock_render_text_or_html_template):
c = {'context': 'context'}
r = ContextRenderer().render_context_to_text_html_templates(c)
self.assertEqual(
r, (
mock_render_text_or_html_template.return_value.strip(),
mock_render_text_or_html_template.return_value.strip()
))
self.assertEqual(
mock_render_text_or_html_template.call_args_list, [call(c, is_text=True), call(c, is_text=False)])
class ContextRendererGetSerializedContextTests(TestCase):
@patch('entity_event.models.DefaultContextSerializer')
def test_get_serialized_context(self, mock_default_context_serializer):
context = {'context': 'context'}
response = ContextRenderer().get_serialized_context(context)
# Assert we have a proper response
self.assertEqual(response, mock_default_context_serializer.return_value.data)
# Assert that we created the default serializer correctly
mock_default_context_serializer.assert_called_once_with(context)
class UnseenEventIdsTest(TestCase):
def test_filters_seen(self):
entity = G(Entity)
medium = G(Medium)
source = G(Source)
G(Subscription, entity=entity, source=source, medium=medium)
e1 = G(Event, context={}, source=source)
e2 = G(Event, context={}, source=source)
Event.objects.filter(id=e2.id).mark_seen(medium)
unseen_ids = _unseen_event_ids(medium)
self.assertEqual(set(unseen_ids), {e1.id})
def test_multiple_mediums(self):
entity1 = G(Entity)
entity2 = G(Entity)
source1 = G(Source)
source2 = G(Source)
medium1 = G(Medium)
medium2 = G(Medium)
G(Subscription, entity=entity1, source=source1, medium=medium1)
G(Subscription, entity=entity1, source=source1, medium=medium2)
G(Subscription, entity=entity2, source=source2, medium=medium2)
event1 = G(Event, context={}, source=source1)
event2 = G(Event, context={}, source=source1)
event3 = G(Event, context={}, source=source2)
event4 = G(Event, context={}, source=source2)
Event.objects.filter(id=event2.id).mark_seen(medium1)
Event.objects.filter(id=event2.id).mark_seen(medium2)
Event.objects.filter(id=event3.id).mark_seen(medium2)
unseen_ids = _unseen_event_ids(medium1)
self.assertEqual(set(unseen_ids), {event1.id, event3.id, event4.id})
unseen_ids = _unseen_event_ids(medium2)
self.assertEqual(set(unseen_ids), {event1.id, event4.id})
class UnicodeTest(TestCase):
def setUp(self):
self.rendering_style = N(RenderingStyle, display_name='Test Render Group', name='test')
self.context_renderer = N(ContextRenderer, name='Test Context Renderer')
self.medium = G(Medium, display_name='Test Medium')
self.source = G(Source, display_name='Test Source')
self.source_group = G(SourceGroup, display_name='Test Source Group')
self.entity = G(Entity, display_name='Test Entity')
self.unsubscription = N(
Unsubscription, entity=self.entity, medium=self.medium, source=self.source)
self.subscription = N(
Subscription, entity=self.entity, source=self.source, medium=self.medium)
self.event = N(Event, source=self.source, context={}, id=1)
self.event_actor = N(EventActor, event=self.event, entity=self.entity)
self.event_seen = N(
EventSeen, event=self.event, medium=self.medium, time_seen=datetime(2014, 1, 2))
def test_RenderingStyle_formats(self):
s = text_type(self.rendering_style)
self.assertEquals(s, 'Test Render Group test')
def test_contextrenderer_formats(self):
s = text_type(self.context_renderer)
self.assertEquals(s, 'Test Context Renderer')
def test_medium_formats(self):
s = text_type(self.medium)
self.assertEqual(s, 'Test Medium')
def test_source_formats(self):
s = text_type(self.source)
self.assertEqual(s, 'Test Source')
def test_sourcegroup_formats(self):
s = text_type(self.source_group)
self.assertEqual(s, 'Test Source Group')
def test_unsubscription_formats(self):
s = text_type(self.unsubscription)
self.assertEqual(s, '{0} from Test Source by Test Medium'.format(self.entity))
def test_subscription_formats(self):
s = text_type(self.subscription)
self.assertEqual(s, '{0} to Test Source by Test Medium'.format(self.entity))
def test_event_formats(self):
s = text_type(self.event)
self.assertTrue(s.startswith('Test Source event at'))
def test_eventactor_formats(self):
s = text_type(self.event_actor)
self.assertEqual(s, 'Event 1 - {0}'.format(self.entity))
def test_event_seenformats(self):
s = text_type(self.event_seen)
self.assertEqual(s, 'Seen on Test Medium at 2014-01-02::00:00:00')
class SubscriptionQuerySetTest(TestCase):
"""
Test the subscription query set class
"""
def setUp(self):
# Call super
super(SubscriptionQuerySetTest, self).setUp()
# Create a query set to use
self.queryset = SubscriptionQuerySet()
@patch.object(SubscriptionQuerySet, 'select_related', autospec=True)
def test_cache_related(self, mock_select_related):
# Call the method
self.queryset.cache_related()
# Assert that we called select related with the correct args
mock_select_related.assert_called_once_with(
self.queryset,
'medium',
'source',
'entity',
'sub_entity_kind'
)
| {
"content_hash": "920e6d4892dbc4a4cceb2eee3cb8db49",
"timestamp": "",
"source": "github",
"line_count": 1068,
"max_line_length": 117,
"avg_line_length": 37.6816479400749,
"alnum_prop": 0.6085876155451745,
"repo_name": "ambitioninc/django-entity-event",
"id": "80dfd436f6df73f795e6d80d967d691e8ad32bdf",
"size": "40244",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "entity_event/tests/model_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "112"
},
{
"name": "Python",
"bytes": "162059"
}
],
"symlink_target": ""
} |
import grpc
from grpc_testing import _common
class ServicerContext(grpc.ServicerContext):
def __init__(self, rpc, time, deadline):
self._rpc = rpc
self._time = time
self._deadline = deadline
def is_active(self):
return self._rpc.is_active()
def time_remaining(self):
if self._rpc.is_active():
if self._deadline is None:
return None
else:
return max(0.0, self._deadline - self._time.time())
else:
return 0.0
def cancel(self):
self._rpc.application_cancel()
def add_callback(self, callback):
return self._rpc.add_callback(callback)
def invocation_metadata(self):
return self._rpc.invocation_metadata()
def peer(self):
raise NotImplementedError()
def peer_identities(self):
raise NotImplementedError()
def peer_identity_key(self):
raise NotImplementedError()
def auth_context(self):
raise NotImplementedError()
def set_compression(self):
raise NotImplementedError()
def send_initial_metadata(self, initial_metadata):
initial_metadata_sent = self._rpc.send_initial_metadata(
_common.fuss_with_metadata(initial_metadata))
if not initial_metadata_sent:
raise ValueError(
'ServicerContext.send_initial_metadata called too late!')
def disable_next_message_compression(self):
raise NotImplementedError()
def set_trailing_metadata(self, trailing_metadata):
self._rpc.set_trailing_metadata(
_common.fuss_with_metadata(trailing_metadata))
def abort(self, code, details):
with self._rpc._condition:
self._rpc._abort(code, details)
def abort_with_status(self, status):
raise NotImplementedError()
def set_code(self, code):
self._rpc.set_code(code)
def set_details(self, details):
self._rpc.set_details(details)
| {
"content_hash": "f88da657da74e84500628e190327da58",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 73,
"avg_line_length": 27.356164383561644,
"alnum_prop": 0.6204306459689535,
"repo_name": "pszemus/grpc",
"id": "6fa8c6b3ba894f15f40f0ca6d6b8e16983f07d46",
"size": "2575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34391"
},
{
"name": "C",
"bytes": "2560265"
},
{
"name": "C#",
"bytes": "2049909"
},
{
"name": "C++",
"bytes": "31789862"
},
{
"name": "CMake",
"bytes": "626322"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "158253"
},
{
"name": "Go",
"bytes": "34791"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "62200"
},
{
"name": "M4",
"bytes": "51635"
},
{
"name": "Makefile",
"bytes": "1008745"
},
{
"name": "Mako",
"bytes": "5629"
},
{
"name": "Objective-C",
"bytes": "639187"
},
{
"name": "Objective-C++",
"bytes": "77720"
},
{
"name": "PHP",
"bytes": "474525"
},
{
"name": "PowerShell",
"bytes": "621"
},
{
"name": "Python",
"bytes": "2958642"
},
{
"name": "Ruby",
"bytes": "1037898"
},
{
"name": "Shell",
"bytes": "470586"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
} |
import numpy as np
from precipitation import read, sum_by_month
from precipitation import create_bargraph
months = np.arange(12) + 1
d12, d13, d14 = read("p12.dat"), read("p13.dat"), read("p14.dat")
prec12 = sum_by_month(d12, months)
prec13 = sum_by_month(d13, months)
prec14 = sum_by_month(d14, months)
create_bargraph("out.png", months,
["2012", "2013", "2014"], prec12, prec13, prec14)
| {
"content_hash": "1a5b25ac46506359465a855331912045",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 65,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6634615384615384,
"repo_name": "gems-uff/noworkflow",
"id": "019391dd52371d64cb9611b0ccfa6cf21acfbb34",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture/noworkflow/resources/demo/annual_precipitation/step2/experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176047"
},
{
"name": "HTML",
"bytes": "238"
},
{
"name": "JavaScript",
"bytes": "787748"
},
{
"name": "Jupyter Notebook",
"bytes": "5241520"
},
{
"name": "Prolog",
"bytes": "18527"
},
{
"name": "Python",
"bytes": "656680"
},
{
"name": "TypeScript",
"bytes": "122003"
}
],
"symlink_target": ""
} |
''' Wit.ai API-based Converters '''
import logging
import os
from abc import abstractproperty
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from pliers.stimuli.text import ComplexTextStim
from pliers.utils import attempt_to_import, verify_dependencies
from pliers.converters.audio import AudioToTextConverter
from pliers.transformers.api import APITransformer
sr = attempt_to_import('speech_recognition', 'sr')
class SpeechRecognitionAPIConverter(APITransformer, AudioToTextConverter):
''' Uses the SpeechRecognition API, which interacts with several APIs,
like Google and Wit, to run speech-to-text transcription on an audio file.
Args:
api_key (str): API key. Must be passed explicitly or stored in
the environment variable specified in the _env_keys field.
rate_limit (int): The minimum number of seconds required between
transform calls on this Transformer.
'''
_log_attributes = ('api_key', 'recognize_method')
VERSION = '1.0'
@abstractproperty
def recognize_method(self):
pass
def __init__(self, api_key=None, rate_limit=None):
verify_dependencies(['sr'])
if api_key is None:
try:
api_key = os.environ[self.env_keys[0]]
except KeyError:
raise ValueError("A valid API key must be passed when a"
" SpeechRecognitionAPIConverter is initialized.")
self.recognizer = sr.Recognizer()
self.api_key = api_key
super().__init__(rate_limit=rate_limit)
def _convert(self, audio):
verify_dependencies(['sr'])
with audio.get_filename() as filename:
with sr.AudioFile(filename) as source:
clip = self.recognizer.record(source)
text = getattr(self.recognizer, self.recognize_method)(clip, self.api_key)
return ComplexTextStim(text=text)
class WitTranscriptionConverter(SpeechRecognitionAPIConverter):
''' Speech-to-text transcription via the Wit.ai API. '''
_env_keys = 'WIT_AI_API_KEY'
recognize_method = 'recognize_wit'
@property
def api_keys(self):
return [self.api_key]
def check_valid_keys(self):
url = "https://api.wit.ai/message?v=20160526&q=authenticate"
request = Request(url, headers={
"Authorization": "Bearer {}".format(self.api_key)
})
try:
urlopen(request)
return True
except HTTPError as e:
logging.warning(str(e))
return False
| {
"content_hash": "cd7b952f10e8f25cd7554ba780b1c18d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 82,
"avg_line_length": 32.425,
"alnum_prop": 0.6484194294525829,
"repo_name": "tyarkoni/pliers",
"id": "ff5fb3664ef82282a25fac8641b276377c894e18",
"size": "2594",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pliers/converters/api/wit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "426286"
}
],
"symlink_target": ""
} |
from conans import ConanFile, CMake
class AversivePlusPlusProjectConan(ConanFile):
name="template-project"
version="0.1"
settings = "os", "compiler", "build_type", "arch", "target"
requires = "filter/0.1@AversivePlusPlus/dev"
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run('cmake --build . %s' % cmake.build_config)
| {
"content_hash": "dae3d6c239b72b4a4bbcbac193a7b393",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 36.15384615384615,
"alnum_prop": 0.6531914893617021,
"repo_name": "AversivePlusPlus/AversivePlusPlus",
"id": "68d7b7b2ea6cf5f91774f26698c075fdc8dc5fc7",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/v17.02-tag",
"path": "modules/filter/test/conanfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "1292"
},
{
"name": "C",
"bytes": "147"
},
{
"name": "C++",
"bytes": "761305"
},
{
"name": "CMake",
"bytes": "47072"
},
{
"name": "Makefile",
"bytes": "29429"
},
{
"name": "Python",
"bytes": "39934"
}
],
"symlink_target": ""
} |
import os
import ast
import copy
import json
import logging
import textwrap
import calendar
import time
import six
import sys
from os.path import join as pjoin
from st2client import models
from st2client.commands import resource
from st2client.commands.resource import add_auth_token_to_kwargs_from_cli
from st2client.exceptions.operations import OperationFailureException
from st2client.formatters import table
from st2client.formatters import execution as execution_formatter
from st2client.utils import jsutil
from st2client.utils.date import format_isodate_for_user_timezone
from st2client.utils.date import parse as parse_isotime
from st2client.utils.color import format_status
LOG = logging.getLogger(__name__)
LIVEACTION_STATUS_REQUESTED = 'requested'
LIVEACTION_STATUS_SCHEDULED = 'scheduled'
LIVEACTION_STATUS_DELAYED = 'delayed'
LIVEACTION_STATUS_RUNNING = 'running'
LIVEACTION_STATUS_SUCCEEDED = 'succeeded'
LIVEACTION_STATUS_FAILED = 'failed'
LIVEACTION_STATUS_TIMED_OUT = 'timeout'
LIVEACTION_STATUS_ABANDONED = 'abandoned'
LIVEACTION_STATUS_CANCELING = 'canceling'
LIVEACTION_STATUS_CANCELED = 'canceled'
LIVEACTION_COMPLETED_STATES = [
LIVEACTION_STATUS_SUCCEEDED,
LIVEACTION_STATUS_FAILED,
LIVEACTION_STATUS_TIMED_OUT,
LIVEACTION_STATUS_CANCELED,
LIVEACTION_STATUS_ABANDONED
]
# Who parameters should be masked when displaying action execution output
PARAMETERS_TO_MASK = [
'password',
'private_key'
]
# A list of environment variables which are never inherited when using run
# --inherit-env flag
ENV_VARS_BLACKLIST = [
'pwd',
'mail',
'username',
'user',
'path',
'home',
'ps1',
'shell',
'pythonpath',
'ssh_tty',
'ssh_connection',
'lang',
'ls_colors',
'logname',
'oldpwd',
'term',
'xdg_session_id'
]
WORKFLOW_RUNNER_TYPES = [
'action-chain',
'mistral-v2',
]
def format_parameters(value):
# Mask sensitive parameters
if not isinstance(value, dict):
# No parameters, leave it as it is
return value
for param_name, _ in value.items():
if param_name in PARAMETERS_TO_MASK:
value[param_name] = '********'
return value
# String for indenting etc.
WF_PREFIX = '+ '
NON_WF_PREFIX = ' '
INDENT_CHAR = ' '
def format_wf_instances(instances):
"""
Adds identification characters to a workflow and appropriately shifts
the non-workflow instances. If no workflows are found does nothing.
"""
# only add extr chars if there are workflows.
has_wf = False
for instance in instances:
if not getattr(instance, 'children', None):
continue
else:
has_wf = True
break
if not has_wf:
return instances
# Prepend wf and non_wf prefixes.
for instance in instances:
if getattr(instance, 'children', None):
instance.id = WF_PREFIX + instance.id
else:
instance.id = NON_WF_PREFIX + instance.id
return instances
def format_execution_statuses(instances):
result = []
for instance in instances:
instance = format_execution_status(instance)
result.append(instance)
return result
def format_execution_status(instance):
"""
Augment instance "status" attribute with number of seconds which have elapsed for all the
executions which are in running state and execution total run time for all the executions
which have finished.
"""
start_timestamp = getattr(instance, 'start_timestamp', None)
end_timestamp = getattr(instance, 'end_timestamp', None)
if instance.status == LIVEACTION_STATUS_RUNNING and start_timestamp:
start_timestamp = instance.start_timestamp
start_timestamp = parse_isotime(start_timestamp)
start_timestamp = calendar.timegm(start_timestamp.timetuple())
now = int(time.time())
elapsed_seconds = (now - start_timestamp)
instance.status = '%s (%ss elapsed)' % (instance.status, elapsed_seconds)
elif instance.status in LIVEACTION_COMPLETED_STATES and start_timestamp and end_timestamp:
start_timestamp = parse_isotime(start_timestamp)
start_timestamp = calendar.timegm(start_timestamp.timetuple())
end_timestamp = parse_isotime(end_timestamp)
end_timestamp = calendar.timegm(end_timestamp.timetuple())
elapsed_seconds = (end_timestamp - start_timestamp)
instance.status = '%s (%ss elapsed)' % (instance.status, elapsed_seconds)
return instance
class ActionBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(ActionBranch, self).__init__(
models.Action, description, app, subparsers,
parent_parser=parent_parser,
commands={
'list': ActionListCommand,
'get': ActionGetCommand,
'update': ActionUpdateCommand,
'delete': ActionDeleteCommand
})
# Registers extended commands
self.commands['enable'] = ActionEnableCommand(self.resource, self.app, self.subparsers)
self.commands['disable'] = ActionDisableCommand(self.resource, self.app, self.subparsers)
self.commands['execute'] = ActionRunCommand(
self.resource, self.app, self.subparsers,
add_help=False)
class ActionListCommand(resource.ContentPackResourceListCommand):
display_attributes = ['ref', 'pack', 'description']
class ActionGetCommand(resource.ContentPackResourceGetCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'uid', 'ref', 'pack', 'name', 'description',
'enabled', 'entry_point', 'runner_type',
'parameters']
class ActionUpdateCommand(resource.ContentPackResourceUpdateCommand):
pass
class ActionEnableCommand(resource.ContentPackResourceEnableCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
'enabled', 'entry_point', 'runner_type',
'parameters']
class ActionDisableCommand(resource.ContentPackResourceDisableCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
'enabled', 'entry_point', 'runner_type',
'parameters']
class ActionDeleteCommand(resource.ContentPackResourceDeleteCommand):
pass
class ActionRunCommandMixin(object):
"""
Mixin class which contains utility functions related to action execution.
"""
display_attributes = ['id', 'action.ref', 'context.user', 'parameters', 'status',
'start_timestamp', 'end_timestamp', 'result']
attribute_display_order = ['id', 'action.ref', 'context.user', 'parameters', 'status',
'start_timestamp', 'end_timestamp', 'result']
attribute_transform_functions = {
'start_timestamp': format_isodate_for_user_timezone,
'end_timestamp': format_isodate_for_user_timezone,
'parameters': format_parameters,
'status': format_status
}
poll_interval = 2 # how often to poll for execution completion when using sync mode
def get_resource(self, ref_or_id, **kwargs):
return self.get_resource_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
if self._print_help(args, **kwargs):
return
execution = self.run(args, **kwargs)
if args.async:
self.print_output('To get the results, execute:\n st2 execution get %s' %
(execution.id), six.text_type)
else:
self._print_execution_details(execution=execution, args=args, **kwargs)
if execution.status == 'failed':
# Exit with non zero if the action has failed
sys.exit(1)
def _add_common_options(self):
root_arg_grp = self.parser.add_mutually_exclusive_group()
# Display options
task_list_arg_grp = root_arg_grp.add_argument_group()
task_list_arg_grp.add_argument('--raw', action='store_true',
help='Raw output, don\'t shot sub-tasks for workflows.')
task_list_arg_grp.add_argument('--show-tasks', action='store_true',
help='Whether to show sub-tasks of an execution.')
task_list_arg_grp.add_argument('--depth', type=int, default=-1,
help='Depth to which to show sub-tasks. \
By default all are shown.')
task_list_arg_grp.add_argument('-w', '--width', nargs='+', type=int, default=None,
help='Set the width of columns in output.')
execution_details_arg_grp = root_arg_grp.add_mutually_exclusive_group()
detail_arg_grp = execution_details_arg_grp.add_mutually_exclusive_group()
detail_arg_grp.add_argument('--attr', nargs='+',
default=['id', 'status', 'parameters', 'result'],
help=('List of attributes to include in the '
'output. "all" or unspecified will '
'return all attributes.'))
detail_arg_grp.add_argument('-d', '--detail', action='store_true',
help='Display full detail of the execution in table format.')
result_arg_grp = execution_details_arg_grp.add_mutually_exclusive_group()
result_arg_grp.add_argument('-k', '--key',
help=('If result is type of JSON, then print specific '
'key-value pair; dot notation for nested JSON is '
'supported.'))
return root_arg_grp
def _print_execution_details(self, execution, args, **kwargs):
"""
Print the execution detail to stdout.
This method takes into account if an executed action was workflow or not
and formats the output accordingly.
"""
runner_type = execution.action.get('runner_type', 'unknown')
is_workflow_action = runner_type in WORKFLOW_RUNNER_TYPES
show_tasks = getattr(args, 'show_tasks', False)
raw = getattr(args, 'raw', False)
detail = getattr(args, 'detail', False)
key = getattr(args, 'key', None)
attr = getattr(args, 'attr', [])
if show_tasks and not is_workflow_action:
raise ValueError('--show-tasks option can only be used with workflow actions')
if not raw and not detail and (show_tasks or is_workflow_action):
self._run_and_print_child_task_list(execution=execution, args=args, **kwargs)
else:
instance = execution
if detail:
formatter = table.PropertyValueTable
else:
formatter = execution_formatter.ExecutionResult
if detail:
options = {'attributes': copy.copy(self.display_attributes)}
elif key:
options = {'attributes': ['result.%s' % (key)], 'key': key}
else:
options = {'attributes': attr}
options['json'] = args.json
options['attribute_transform_functions'] = self.attribute_transform_functions
self.print_output(instance, formatter, **options)
def _run_and_print_child_task_list(self, execution, args, **kwargs):
action_exec_mgr = self.app.client.managers['LiveAction']
instance = execution
options = {'attributes': ['id', 'action.ref', 'parameters', 'status', 'start_timestamp',
'end_timestamp']}
options['json'] = args.json
options['attribute_transform_functions'] = self.attribute_transform_functions
formatter = execution_formatter.ExecutionResult
kwargs['depth'] = args.depth
child_instances = action_exec_mgr.get_property(execution.id, 'children', **kwargs)
child_instances = self._format_child_instances(child_instances, execution.id)
child_instances = format_execution_statuses(child_instances)
if not child_instances:
# No child error, there might be a global error, include result in the output
options['attributes'].append('result')
# On failure we also want to include error message and traceback at the top level
if instance.status == 'failed':
status_index = options['attributes'].index('status')
if isinstance(instance.result, dict):
tasks = instance.result.get('tasks', [])
else:
tasks = []
top_level_error, top_level_traceback = self._get_top_level_error(live_action=instance)
if len(tasks) >= 1:
task_error, task_traceback = self._get_task_error(task=tasks[-1])
else:
task_error, task_traceback = None, None
if top_level_error:
# Top-level error
instance.error = top_level_error
instance.traceback = top_level_traceback
instance.result = 'See error and traceback.'
options['attributes'].insert(status_index + 1, 'error')
options['attributes'].insert(status_index + 2, 'traceback')
elif task_error:
# Task error
instance.error = task_error
instance.traceback = task_traceback
instance.result = 'See error and traceback.'
instance.failed_on = tasks[-1].get('name', 'unknown')
options['attributes'].insert(status_index + 1, 'error')
options['attributes'].insert(status_index + 2, 'traceback')
options['attributes'].insert(status_index + 3, 'failed_on')
# print root task
self.print_output(instance, formatter, **options)
# print child tasks
if child_instances:
self.print_output(child_instances, table.MultiColumnTable,
attributes=['id', 'status', 'task', 'action', 'start_timestamp'],
widths=args.width, json=args.json,
yaml=args.yaml,
attribute_transform_functions=self.attribute_transform_functions)
def _get_execution_result(self, execution, action_exec_mgr, args, **kwargs):
pending_statuses = [
LIVEACTION_STATUS_REQUESTED,
LIVEACTION_STATUS_SCHEDULED,
LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_CANCELING
]
if not args.async:
while execution.status in pending_statuses:
time.sleep(self.poll_interval)
if not args.json and not args.yaml:
sys.stdout.write('.')
sys.stdout.flush()
execution = action_exec_mgr.get_by_id(execution.id, **kwargs)
sys.stdout.write('\n')
if execution.status == LIVEACTION_STATUS_CANCELED:
return execution
return execution
def _get_top_level_error(self, live_action):
"""
Retrieve a top level workflow error.
:return: (error, traceback)
"""
if isinstance(live_action.result, dict):
error = live_action.result.get('error', None)
traceback = live_action.result.get('traceback', None)
else:
error = "See result"
traceback = "See result"
return error, traceback
def _get_task_error(self, task):
"""
Retrieve error message from the provided task.
:return: (error, traceback)
"""
if not task:
return None, None
result = task['result']
if isinstance(result, dict):
stderr = result.get('stderr', None)
error = result.get('error', None)
traceback = result.get('traceback', None)
error = error if error else stderr
else:
stderr = None
error = None
traceback = None
return error, traceback
def _get_action_parameters_from_args(self, action, runner, args):
"""
Build a dictionary with parameters which will be passed to the action by
parsing parameters passed to the CLI.
:param args: CLI argument.
:type args: ``object``
:rtype: ``dict``
"""
action_ref_or_id = action.ref
def read_file(file_path):
if not os.path.exists(file_path):
raise ValueError('File "%s" doesn\'t exist' % (file_path))
if not os.path.isfile(file_path):
raise ValueError('"%s" is not a file' % (file_path))
with open(file_path, 'rb') as fp:
content = fp.read()
return content
def transform_object(value):
# Also support simple key1=val1,key2=val2 syntax
if value.startswith('{'):
# Assume it's JSON
result = value = json.loads(value)
else:
pairs = value.split(',')
result = {}
for pair in pairs:
split = pair.split('=', 1)
if len(split) != 2:
continue
key, value = split
result[key] = value
return result
def transform_array(value):
try:
result = json.loads(value)
except ValueError:
result = [v.strip() for v in value.split(',')]
return result
transformer = {
'array': transform_array,
'boolean': (lambda x: ast.literal_eval(x.capitalize())),
'integer': int,
'number': float,
'object': transform_object,
'string': str
}
def normalize(name, value):
if name in runner.runner_parameters:
param = runner.runner_parameters[name]
if 'type' in param and param['type'] in transformer:
return transformer[param['type']](value)
if name in action.parameters:
param = action.parameters[name]
if 'type' in param and param['type'] in transformer:
return transformer[param['type']](value)
return value
result = {}
if not args.parameters:
return result
for idx in range(len(args.parameters)):
arg = args.parameters[idx]
if '=' in arg:
k, v = arg.split('=', 1)
# Attribute for files are prefixed with "@"
if k.startswith('@'):
k = k[1:]
is_file = True
else:
is_file = False
try:
if is_file:
# Files are handled a bit differently since we ship the content
# over the wire
file_path = os.path.normpath(pjoin(os.getcwd(), v))
file_name = os.path.basename(file_path)
content = read_file(file_path=file_path)
if action_ref_or_id == 'core.http':
# Special case for http runner
result['_file_name'] = file_name
result['file_content'] = content
else:
result[k] = content
else:
result[k] = normalize(k, v)
except Exception as e:
# TODO: Move transformers in a separate module and handle
# exceptions there
if 'malformed string' in str(e):
message = ('Invalid value for boolean parameter. '
'Valid values are: true, false')
raise ValueError(message)
else:
raise e
else:
result['cmd'] = ' '.join(args.parameters[idx:])
break
# Special case for http runner
if 'file_content' in result:
if 'method' not in result:
# Default to POST if a method is not provided
result['method'] = 'POST'
if 'file_name' not in result:
# File name not provided, use default file name
result['file_name'] = result['_file_name']
del result['_file_name']
if args.inherit_env:
result['env'] = self._get_inherited_env_vars()
return result
@add_auth_token_to_kwargs_from_cli
def _print_help(self, args, **kwargs):
# Print appropriate help message if the help option is given.
action_mgr = self.app.client.managers['Action']
action_exec_mgr = self.app.client.managers['LiveAction']
if args.help:
action_ref_or_id = getattr(args, 'ref_or_id', None)
action_exec_id = getattr(args, 'id', None)
if action_exec_id and not action_ref_or_id:
action_exec = action_exec_mgr.get_by_id(action_exec_id, **kwargs)
args.ref_or_id = action_exec.action
if action_ref_or_id:
try:
action = action_mgr.get_by_ref_or_id(args.ref_or_id, **kwargs)
if not action:
raise resource.ResourceNotFoundError('Action %s not found', args.ref_or_id)
runner_mgr = self.app.client.managers['RunnerType']
runner = runner_mgr.get_by_name(action.runner_type, **kwargs)
parameters, required, optional, _ = self._get_params_types(runner,
action)
print('')
print(textwrap.fill(action.description))
print('')
if required:
required = self._sort_parameters(parameters=parameters,
names=required)
print('Required Parameters:')
[self._print_param(name, parameters.get(name))
for name in required]
if optional:
optional = self._sort_parameters(parameters=parameters,
names=optional)
print('Optional Parameters:')
[self._print_param(name, parameters.get(name))
for name in optional]
except resource.ResourceNotFoundError:
print(('Action "%s" is not found. ' % args.ref_or_id) +
'Do "st2 action list" to see list of available actions.')
except Exception as e:
print('ERROR: Unable to print help for action "%s". %s' %
(args.ref_or_id, e))
else:
self.parser.print_help()
return True
return False
@staticmethod
def _print_param(name, schema):
if not schema:
raise ValueError('Missing schema for parameter "%s"' % (name))
wrapper = textwrap.TextWrapper(width=78)
wrapper.initial_indent = ' ' * 4
wrapper.subsequent_indent = wrapper.initial_indent
print(wrapper.fill(name))
wrapper.initial_indent = ' ' * 8
wrapper.subsequent_indent = wrapper.initial_indent
if 'description' in schema and schema['description']:
print(wrapper.fill(schema['description']))
if 'type' in schema and schema['type']:
print(wrapper.fill('Type: %s' % schema['type']))
if 'enum' in schema and schema['enum']:
print(wrapper.fill('Enum: %s' % ', '.join(schema['enum'])))
if 'default' in schema and schema['default'] is not None:
print(wrapper.fill('Default: %s' % schema['default']))
print('')
@staticmethod
def _get_params_types(runner, action):
runner_params = runner.runner_parameters
action_params = action.parameters
parameters = copy.copy(runner_params)
parameters.update(copy.copy(action_params))
required = set([k for k, v in six.iteritems(parameters) if v.get('required')])
def is_immutable(runner_param_meta, action_param_meta):
# If runner sets a param as immutable, action cannot override that.
if runner_param_meta.get('immutable', False):
return True
else:
return action_param_meta.get('immutable', False)
immutable = set()
for param in parameters.keys():
if is_immutable(runner_params.get(param, {}),
action_params.get(param, {})):
immutable.add(param)
required = required - immutable
optional = set(parameters.keys()) - required - immutable
return parameters, required, optional, immutable
def _format_child_instances(self, children, parent_id):
'''
The goal of this method is to add an indent at every level. This way the
WF is represented as a tree structure while in a list. For the right visuals
representation the list must be a DF traversal else the idents will end up
looking strange.
'''
# apply basic WF formating first.
children = format_wf_instances(children)
# setup a depth lookup table
depth = {parent_id: 0}
result = []
# main loop that indents each entry correctly
for child in children:
# make sure child.parent is in depth and while at it compute the
# right depth for indentation purposes.
if child.parent not in depth:
parent = None
for instance in children:
if WF_PREFIX in instance.id:
instance_id = instance.id[instance.id.index(WF_PREFIX) + len(WF_PREFIX):]
else:
instance_id = instance.id
if instance_id == child.parent:
parent = instance
if parent and parent.parent and parent.parent in depth:
depth[child.parent] = depth[parent.parent] + 1
else:
depth[child.parent] = 0
# now ident for the right visuals
child.id = INDENT_CHAR * depth[child.parent] + child.id
result.append(self._format_for_common_representation(child))
return result
def _format_for_common_representation(self, task):
'''
Formats a task for common representation between mistral and action-chain.
'''
# This really needs to be better handled on the back-end but that would be a bigger
# change so handling in cli.
context = getattr(task, 'context', None)
if context and 'chain' in context:
task_name_key = 'context.chain.name'
elif context and 'mistral' in context:
task_name_key = 'context.mistral.task_name'
# Use LiveAction as the object so that the formatter lookup does not change.
# AKA HACK!
return models.action.LiveAction(**{
'id': task.id,
'status': task.status,
'task': jsutil.get_value(vars(task), task_name_key),
'action': task.action.get('ref', None),
'start_timestamp': task.start_timestamp,
'end_timestamp': getattr(task, 'end_timestamp', None)
})
def _sort_parameters(self, parameters, names):
"""
Sort a provided list of action parameters.
:type parameters: ``list``
:type names: ``list`` or ``set``
"""
sorted_parameters = sorted(names, key=lambda name:
self._get_parameter_sort_value(
parameters=parameters,
name=name))
return sorted_parameters
def _get_parameter_sort_value(self, parameters, name):
"""
Return a value which determines sort order for a particular parameter.
By default, parameters are sorted using "position" parameter attribute.
If this attribute is not available, parameter is sorted based on the
name.
"""
parameter = parameters.get(name, None)
if not parameter:
return None
sort_value = parameter.get('position', name)
return sort_value
def _get_inherited_env_vars(self):
env_vars = os.environ.copy()
for var_name in ENV_VARS_BLACKLIST:
if var_name.lower() in env_vars:
del env_vars[var_name.lower()]
if var_name.upper() in env_vars:
del env_vars[var_name.upper()]
return env_vars
class ActionRunCommand(ActionRunCommandMixin, resource.ResourceCommand):
def __init__(self, resource, *args, **kwargs):
super(ActionRunCommand, self).__init__(
resource, kwargs.pop('name', 'execute'),
'A command to invoke an action manually.',
*args, **kwargs)
self.parser.add_argument('ref_or_id', nargs='?',
metavar='ref-or-id',
help='Action reference (pack.action_name) ' +
'or ID of the action.')
self.parser.add_argument('parameters', nargs='*',
help='List of keyword args, positional args, '
'and optional args for the action.')
self.parser.add_argument('-h', '--help',
action='store_true', dest='help',
help='Print usage for the given action.')
self._add_common_options()
if self.name in ['run', 'execute']:
self.parser.add_argument('--trace-tag', '--trace_tag',
help='A trace tag string to track execution later.',
dest='trace_tag', required=False)
self.parser.add_argument('--trace-id',
help='Existing trace id for this execution.',
dest='trace_id', required=False)
self.parser.add_argument('-a', '--async',
action='store_true', dest='async',
help='Do not wait for action to finish.')
self.parser.add_argument('-e', '--inherit-env',
action='store_true', dest='inherit_env',
help='Pass all the environment variables '
'which are accessible to the CLI as "env" '
'parameter to the action. Note: Only works '
'with python, local and remote runners.')
self.parser.add_argument('-u', '--user', type=str, default=None,
help='User under which to run the action (admins only).')
if self.name == 'run':
self.parser.set_defaults(async=False)
else:
self.parser.set_defaults(async=True)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
if not args.ref_or_id:
self.parser.error('Missing action reference or id')
action = self.get_resource(args.ref_or_id, **kwargs)
if not action:
raise resource.ResourceNotFoundError('Action "%s" cannot be found.'
% (args.ref_or_id))
runner_mgr = self.app.client.managers['RunnerType']
runner = runner_mgr.get_by_name(action.runner_type, **kwargs)
if not runner:
raise resource.ResourceNotFoundError('Runner type "%s" for action "%s" cannot be found.'
% (action.runner_type, action.name))
action_ref = '.'.join([action.pack, action.name])
action_parameters = self._get_action_parameters_from_args(action=action, runner=runner,
args=args)
execution = models.LiveAction()
execution.action = action_ref
execution.parameters = action_parameters
execution.user = args.user
if not args.trace_id and args.trace_tag:
execution.context = {'trace_context': {'trace_tag': args.trace_tag}}
if args.trace_id:
execution.context = {'trace_context': {'id_': args.trace_id}}
action_exec_mgr = self.app.client.managers['LiveAction']
execution = action_exec_mgr.create(execution, **kwargs)
execution = self._get_execution_result(execution=execution,
action_exec_mgr=action_exec_mgr,
args=args, **kwargs)
return execution
class ActionExecutionBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(ActionExecutionBranch, self).__init__(
models.LiveAction, description, app, subparsers,
parent_parser=parent_parser, read_only=True,
commands={'list': ActionExecutionListCommand,
'get': ActionExecutionGetCommand})
# Register extended commands
self.commands['re-run'] = ActionExecutionReRunCommand(self.resource, self.app,
self.subparsers, add_help=False)
self.commands['cancel'] = ActionExecutionCancelCommand(self.resource, self.app,
self.subparsers, add_help=False)
POSSIBLE_ACTION_STATUS_VALUES = ('succeeded', 'running', 'scheduled', 'failed', 'canceled')
class ActionExecutionReadCommand(resource.ResourceCommand):
"""
Base class for read / view commands (list and get).
"""
@classmethod
def _get_exclude_attributes(cls, args):
"""
Retrieve a list of exclude attributes for particular command line arguments.
"""
exclude_attributes = []
result_included = False
trigger_instance_included = False
for attr in args.attr:
# Note: We perform startswith check so we correctly detected child attribute properties
# (e.g. result, result.stdout, result.stderr, etc.)
if attr.startswith('result'):
result_included = True
if attr.startswith('trigger_instance'):
trigger_instance_included = True
if not result_included:
exclude_attributes.append('result')
if not trigger_instance_included:
exclude_attributes.append('trigger_instance')
return exclude_attributes
class ActionExecutionListCommand(ActionExecutionReadCommand):
display_attributes = ['id', 'action.ref', 'context.user', 'status', 'start_timestamp',
'end_timestamp']
attribute_transform_functions = {
'start_timestamp': format_isodate_for_user_timezone,
'end_timestamp': format_isodate_for_user_timezone,
'parameters': format_parameters,
'status': format_status
}
def __init__(self, resource, *args, **kwargs):
super(ActionExecutionListCommand, self).__init__(
resource, 'list', 'Get the list of the 50 most recent %s.' %
resource.get_plural_display_name().lower(),
*args, **kwargs)
self.group = self.parser.add_argument_group()
self.parser.add_argument('-n', '--last', type=int, dest='last',
default=50,
help=('List N most recent %s.' %
resource.get_plural_display_name().lower()))
self.parser.add_argument('-s', '--sort', type=str, dest='sort_order',
default='descending',
help=('Sort %s by start timestamp, '
'asc|ascending (earliest first) '
'or desc|descending (latest first)' %
resource.get_plural_display_name().lower()))
# Filter options
self.group.add_argument('--action', help='Action reference to filter the list.')
self.group.add_argument('--status', help=('Only return executions with the provided status.'
' Possible values are \'%s\', \'%s\', \'%s\','
'\'%s\' or \'%s\''
'.' % POSSIBLE_ACTION_STATUS_VALUES))
self.group.add_argument('--trigger_instance',
help='Trigger instance id to filter the list.')
self.parser.add_argument('-tg', '--timestamp-gt', type=str, dest='timestamp_gt',
default=None,
help=('Only return executions with timestamp '
'greater than the one provided. '
'Use time in the format "2000-01-01T12:00:00.000Z".'))
self.parser.add_argument('-tl', '--timestamp-lt', type=str, dest='timestamp_lt',
default=None,
help=('Only return executions with timestamp '
'lower than the one provided. '
'Use time in the format "2000-01-01T12:00:00.000Z".'))
self.parser.add_argument('-l', '--showall', action='store_true',
help='')
# Display options
self.parser.add_argument('-a', '--attr', nargs='+',
default=self.display_attributes,
help=('List of attributes to include in the '
'output. "all" will return all '
'attributes.'))
self.parser.add_argument('-w', '--width', nargs='+', type=int,
default=None,
help=('Set the width of columns in output.'))
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# Filtering options
if args.action:
kwargs['action'] = args.action
if args.status:
kwargs['status'] = args.status
if args.trigger_instance:
kwargs['trigger_instance'] = args.trigger_instance
if not args.showall:
# null is the magic string that translates to does not exist.
kwargs['parent'] = 'null'
if args.timestamp_gt:
kwargs['timestamp_gt'] = args.timestamp_gt
if args.timestamp_lt:
kwargs['timestamp_lt'] = args.timestamp_lt
if args.sort_order:
if args.sort_order in ['asc', 'ascending']:
kwargs['sort_asc'] = True
elif args.sort_order in ['desc', 'descending']:
kwargs['sort_desc'] = True
# We exclude "result" and "trigger_instance" attributes which can contain a lot of data
# since they are not displayed nor used which speeds the common operation substantially.
exclude_attributes = self._get_exclude_attributes(args=args)
exclude_attributes = ','.join(exclude_attributes)
kwargs['exclude_attributes'] = exclude_attributes
return self.manager.query(limit=args.last, **kwargs)
def run_and_print(self, args, **kwargs):
instances = format_wf_instances(self.run(args, **kwargs))
if not args.json and not args.yaml:
# Include elapsed time for running executions
instances = format_execution_statuses(instances)
self.print_output(reversed(instances), table.MultiColumnTable,
attributes=args.attr, widths=args.width,
json=args.json,
yaml=args.yaml,
attribute_transform_functions=self.attribute_transform_functions)
class ActionExecutionGetCommand(ActionRunCommandMixin, ActionExecutionReadCommand):
display_attributes = ['id', 'action.ref', 'context.user', 'parameters', 'status',
'start_timestamp', 'end_timestamp', 'result', 'liveaction']
def __init__(self, resource, *args, **kwargs):
super(ActionExecutionGetCommand, self).__init__(
resource, 'get',
'Get individual %s.' % resource.get_display_name().lower(),
*args, **kwargs)
self.parser.add_argument('id',
help=('ID of the %s.' %
resource.get_display_name().lower()))
self._add_common_options()
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# We exclude "result" and / or "trigger_instance" attribute if it's not explicitly
# requested by user either via "--attr" flag or by default.
exclude_attributes = self._get_exclude_attributes(args=args)
exclude_attributes = ','.join(exclude_attributes)
kwargs['params'] = {'exclude_attributes': exclude_attributes}
execution = self.get_resource_by_id(id=args.id, **kwargs)
return execution
@add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
try:
execution = self.run(args, **kwargs)
if not args.json and not args.yaml:
# Include elapsed time for running executions
execution = format_execution_status(execution)
except resource.ResourceNotFoundError:
self.print_not_found(args.id)
raise OperationFailureException('Execution %s not found.' % (args.id))
return self._print_execution_details(execution=execution, args=args, **kwargs)
class ActionExecutionCancelCommand(resource.ResourceCommand):
def __init__(self, resource, *args, **kwargs):
super(ActionExecutionCancelCommand, self).__init__(
resource, 'cancel', 'Cancel %s.' %
resource.get_plural_display_name().lower(),
*args, **kwargs)
self.parser.add_argument('ids',
nargs='+',
help=('IDs of the %ss to cancel.' %
resource.get_display_name().lower()))
def run(self, args, **kwargs):
responses = []
for execution_id in args.ids:
response = self.manager.delete_by_id(execution_id)
responses.append([execution_id, response])
return responses
@add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
responses = self.run(args, **kwargs)
for execution_id, response in responses:
self._print_result(execution_id=execution_id, response=response)
def _print_result(self, execution_id, response):
if response and 'faultstring' in response:
message = response.get('faultstring', 'Cancellation requested for %s with id %s.' %
(self.resource.get_display_name().lower(), execution_id))
elif response:
message = '%s with id %s canceled.' % (self.resource.get_display_name().lower(),
execution_id)
else:
message = 'Cannot cancel %s with id %s.' % (self.resource.get_display_name().lower(),
execution_id)
print(message)
class ActionExecutionReRunCommand(ActionRunCommandMixin, resource.ResourceCommand):
def __init__(self, resource, *args, **kwargs):
super(ActionExecutionReRunCommand, self).__init__(
resource, kwargs.pop('name', 're-run'),
'A command to re-run a particular action.',
*args, **kwargs)
self.parser.add_argument('id', nargs='?',
metavar='id',
help='ID of action execution to re-run ')
self.parser.add_argument('parameters', nargs='*',
help='List of keyword args, positional args, '
'and optional args for the action.')
self.parser.add_argument('--tasks', nargs='*',
help='Name of the workflow tasks to re-run.')
self.parser.add_argument('--no-reset', dest='no_reset', nargs='*',
help='Name of the with-items tasks to not reset. This only '
'applies to Mistral workflows. By default, all iterations '
'for with-items tasks is rerun. If no reset, only failed '
' iterations are rerun.')
self.parser.add_argument('-a', '--async',
action='store_true', dest='async',
help='Do not wait for action to finish.')
self.parser.add_argument('-e', '--inherit-env',
action='store_true', dest='inherit_env',
help='Pass all the environment variables '
'which are accessible to the CLI as "env" '
'parameter to the action. Note: Only works '
'with python, local and remote runners.')
self.parser.add_argument('-h', '--help',
action='store_true', dest='help',
help='Print usage for the given action.')
self._add_common_options()
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
existing_execution = self.manager.get_by_id(args.id, **kwargs)
if not existing_execution:
raise resource.ResourceNotFoundError('Action execution with id "%s" cannot be found.' %
(args.id))
action_mgr = self.app.client.managers['Action']
runner_mgr = self.app.client.managers['RunnerType']
action_exec_mgr = self.app.client.managers['LiveAction']
action_ref = existing_execution.action['ref']
action = action_mgr.get_by_ref_or_id(action_ref)
runner = runner_mgr.get_by_name(action.runner_type)
action_parameters = self._get_action_parameters_from_args(action=action, runner=runner,
args=args)
execution = action_exec_mgr.re_run(execution_id=args.id,
parameters=action_parameters,
tasks=args.tasks,
no_reset=args.no_reset,
**kwargs)
execution = self._get_execution_result(execution=execution,
action_exec_mgr=action_exec_mgr,
args=args, **kwargs)
return execution
| {
"content_hash": "41f3bc97aa20e966082b7391adae8cab",
"timestamp": "",
"source": "github",
"line_count": 1166,
"max_line_length": 100,
"avg_line_length": 41.38421955403088,
"alnum_prop": 0.545923653997596,
"repo_name": "lakshmi-kannan/st2",
"id": "03b43c34c2874fa25f243d82ebe1f8f2c7639afb",
"size": "49034",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "st2client/st2client/commands/action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41834"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3895413"
},
{
"name": "Shell",
"bytes": "40304"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from django.contrib import messages
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from core.spaces.models import Space
from e_cidadania import settings
def index_view(request):
"""
Main view for the index page. It's separated from the urls.py file
because using direct_to_template in urls.py doesn't refresh the content
(it's loaded only once).
"""
extra_context = {
'version': settings.__version__,
'status': settings.__status__,
'debug_mode': settings.DEBUG,
#'cache_timeout': 500,
}
if request.user.is_anonymous():
messages.warning(request, _("Hi! It seems that it's your first time \
here. Maybe you want to <a href=\"/accounts/register\">register</a> \
or <a href=\"/accounts/login/\">login</a> if you have an account."))
return render_to_response('site_index.html', extra_context,
context_instance=RequestContext(request))
else:
return HttpResponseRedirect(reverse('profile_overview'))
| {
"content_hash": "9910eba2a343f8370d50876ccc74d10b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 36.088235294117645,
"alnum_prop": 0.6797066014669927,
"repo_name": "cidadania/ecidadania-ng",
"id": "1eaf1667ff68dbf80a4dcf9a95a5dfbc0d0651a8",
"size": "1890",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/ecidadania/views/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "41262"
},
{
"name": "HTML",
"bytes": "85966"
},
{
"name": "JavaScript",
"bytes": "3818"
},
{
"name": "Python",
"bytes": "148480"
},
{
"name": "Ruby",
"bytes": "946"
}
],
"symlink_target": ""
} |
import errno
import os
import shutil
import warnings
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def rm_rf(path):
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
| {
"content_hash": "04715e3de94bf3e90ff8467445085f47",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 25.685714285714287,
"alnum_prop": 0.6006674082313682,
"repo_name": "inodb/revmut",
"id": "4553d66a57f30d54510bbd8962627ba78c009299",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "revmut/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102820"
}
],
"symlink_target": ""
} |
"""Tests for the SSH syslog plugin."""
import unittest
from tests.parsers.syslog_plugins import test_lib
class SSHSyslogPluginTest(test_lib.SyslogPluginTestCase):
"""Tests for the SSH syslog plugin."""
def testParse(self):
"""Tests the Parse function."""
storage_writer = self._ParseFileWithPlugin(['syslog_ssh.log'], 'ssh')
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 9)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 9)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'data_type': 'syslog:line',
'last_written_time': '0000-03-11T00:00:00'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
expected_event_values = {
'address': '192.168.0.1',
'body': (
'Accepted publickey for plaso from 192.168.0.1 port 59229 ssh2: '
'RSA 00:aa:bb:cc:dd:ee:ff:11:22:33:44:55:66:77:88:99'),
'data_type': 'syslog:ssh:login',
'fingerprint': 'RSA 00:aa:bb:cc:dd:ee:ff:11:22:33:44:55:66:77:88:99',
'last_written_time': '0000-03-11T19:26:39'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 1)
self.CheckEventData(event_data, expected_event_values)
expected_event_values = {
'address': '001:db8:a0b:12f0::1',
'data_type': 'syslog:ssh:failed_connection',
'last_written_time': '0000-03-11T22:55:30',
'port': '8759'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 3)
self.CheckEventData(event_data, expected_event_values)
expected_event_values = {
'address': '188.124.3.41',
'data_type': 'syslog:ssh:opened_connection',
'last_written_time': '0000-03-11T22:55:31'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 4)
self.CheckEventData(event_data, expected_event_values)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f43ca889cde05da8ace74c0f7b6e8f38",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 35.13235294117647,
"alnum_prop": 0.6688991209711176,
"repo_name": "log2timeline/plaso",
"id": "7061bc9f631961c06ae700e61cf08aeb21c39a2a",
"size": "2436",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/parsers/syslog_plugins/ssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345186"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
} |
import datetime
from haystack import indexes
from .models import Ticket
class TicketIndex(indexes.SearchIndex, indexes.Indexable):
# text = indexes.CharField(document=True, use_template=True)
text = indexes.EdgeNgramField(indexed=True, document=True, use_template=True)
name = indexes.CharField(stored=True, indexed=False, model_attr='name')
detail = indexes.CharField(stored=True, indexed=False, model_attr='detail')
price = indexes.CharField(stored=True, indexed=False, model_attr='price')
expire_date = indexes.DateField(stored=True, indexed=False, model_attr='expire_date')
currency = indexes.CharField(stored=True, indexed=False, model_attr='currency')
ticket_image_url = indexes.CharField(stored=True, indexed=False, model_attr='ticket_image_url')
available = indexes.BooleanField(stored=True, indexed=False, model_attr='available')
# name = indexes.EdgeNgramField(model_attr='name')
# detail = indexes.EdgeNgramField(model_attr='detail')
def get_model(self):
return Ticket
def index_queryset(self, using=None):
return self.get_model().objects.filter(available=True) | {
"content_hash": "3e97f64cb2237cf43d4d8fd2c8ad1a18",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 99,
"avg_line_length": 46.04,
"alnum_prop": 0.7341442224152911,
"repo_name": "Go-In/go-coup",
"id": "4e4262c787cda313df3dac43c5913e8ce64f6758",
"size": "1151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/storemanage/search_indexes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92272"
},
{
"name": "Go",
"bytes": "4452"
},
{
"name": "HTML",
"bytes": "81576"
},
{
"name": "JavaScript",
"bytes": "46370"
},
{
"name": "Python",
"bytes": "95161"
},
{
"name": "Shell",
"bytes": "172"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import copy
import cPickle
import gzip
import os
import urllib
import random
import stat
import subprocess
import sys
import timeit
import numpy
import theano
from theano import tensor as T
# Otherwise the deepcopy fails
import sys
sys.setrecursionlimit(1500)
PREFIX = os.getenv(
'ATISDATA',
os.path.join(os.path.split(os.path.abspath(os.path.dirname(__file__)))[0],
'data'))
# utils functions
def shuffle(lol, seed):
'''
lol :: list of list as input
seed :: seed the shuffling
shuffle inplace each list in the same order
'''
for l in lol:
random.seed(seed)
random.shuffle(l)
# start-snippet-1
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
l :: array containing the word indexes
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [-1] + l + win // 2 * [-1]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
# end-snippet-1
# data loading functions
def atisfold(fold):
assert fold in range(5)
filename = os.path.join(PREFIX, 'atis.fold'+str(fold)+'.pkl.gz')
f = gzip.open(filename, 'rb')
train_set, valid_set, test_set, dicts = cPickle.load(f)
return train_set, valid_set, test_set, dicts
# metrics function using conlleval.pl
def conlleval(p, g, w, filename, script_path):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
OUTPUT:
filename :: name of the file where the predictions
are written. it will be the input of conlleval.pl script
for computing the performance in terms of precision
recall and f1 score
OTHER:
script_path :: path to the directory containing the
conlleval.pl script
'''
out = ''
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
f = open(filename, 'w')
f.writelines(out)
f.close()
return get_perf(filename, script_path)
def download(origin, destination):
'''
download the corresponding atis file
from http://www-etud.iro.umontreal.ca/~mesnilgr/atis/
'''
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, destination)
def get_perf(filename, folder):
''' run conlleval.pl perl script to obtain
precision/recall and F1 score '''
_conlleval = os.path.join(folder, 'conlleval.pl')
if not os.path.isfile(_conlleval):
url = 'http://www-etud.iro.umontreal.ca/~mesnilgr/atis/conlleval.pl'
download(url, _conlleval)
os.chmod(_conlleval, stat.S_IRWXU) # give the execute permissions
proc = subprocess.Popen(["perl",
_conlleval],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, _ = proc.communicate(''.join(open(filename).readlines()))
for line in stdout.split('\n'):
if 'accuracy' in line:
out = line.split()
break
precision = float(out[6][:-2])
recall = float(out[8][:-2])
f1score = float(out[10])
return {'p': precision, 'r': recall, 'f1': f1score}
# start-snippet-2
class RNNSLU(object):
''' elman neural net model '''
def __init__(self, nh, nc, ne, de, cs):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(name='embeddings',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(ne+1, de))
# add one for padding at the end
.astype(theano.config.floatX))
self.wx = theano.shared(name='wx',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(de * cs, nh))
.astype(theano.config.floatX))
self.wh = theano.shared(name='wh',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(nh, nh))
.astype(theano.config.floatX))
self.w = theano.shared(name='w',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(nh, nc))
.astype(theano.config.floatX))
self.bh = theano.shared(name='bh',
value=numpy.zeros(nh,
dtype=theano.config.floatX))
self.b = theano.shared(name='b',
value=numpy.zeros(nc,
dtype=theano.config.floatX))
self.h0 = theano.shared(name='h0',
value=numpy.zeros(nh,
dtype=theano.config.floatX))
# bundle
self.params = [self.emb, self.wx, self.wh, self.w,
self.bh, self.b, self.h0]
# end-snippet-2
# as many columns as context window size
# as many lines as words in the sentence
# start-snippet-3
idxs = T.imatrix()
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y_sentence = T.ivector('y_sentence') # labels
# end-snippet-3 start-snippet-4
def recurrence(x_t, h_tm1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.wx)
+ T.dot(h_tm1, self.wh) + self.bh)
s_t = T.nnet.softmax(T.dot(h_t, self.w) + self.b)
return [h_t, s_t]
[h, s], _ = theano.scan(fn=recurrence,
sequences=x,
outputs_info=[self.h0, None],
n_steps=x.shape[0])
p_y_given_x_sentence = s[:, 0, :]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# end-snippet-4
# cost and gradients and learning rate
# start-snippet-5
lr = T.scalar('lr')
sentence_nll = -T.mean(T.log(p_y_given_x_sentence)
[T.arange(x.shape[0]), y_sentence])
sentence_gradients = T.grad(sentence_nll, self.params)
sentence_updates = OrderedDict((p, p - lr*g)
for p, g in
zip(self.params, sentence_gradients))
# end-snippet-5
# theano functions to compile
# start-snippet-6
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, y_sentence, lr],
outputs=sentence_nll,
updates=sentence_updates)
# end-snippet-6 start-snippet-7
self.normalize = theano.function(inputs=[],
updates={self.emb:
self.emb /
T.sqrt((self.emb**2)
.sum(axis=1))
.dimshuffle(0, 'x')})
# end-snippet-7
def train(self, x, y, window_size, learning_rate):
cwords = contextwin(x, window_size)
words = map(lambda x: numpy.asarray(x).astype('int32'), cwords)
labels = y
self.sentence_train(words, labels, learning_rate)
self.normalize()
def save(self, folder):
for param in self.params:
numpy.save(os.path.join(folder,
param.name + '.npy'), param.get_value())
def load(self, folder):
for param in self.params:
param.set_value(numpy.load(os.path.join(folder,
param.name + '.npy')))
def main(param=None):
if not param:
param = {
'fold': 3,
# 5 folds 0,1,2,3,4
'data': 'atis',
'lr': 0.0970806646812754,
'verbose': 1,
'decay': True,
# decay on the learning rate if improvement stops
'win': 7,
# number of words in the context window
'nhidden': 200,
# number of hidden units
'seed': 345,
'emb_dimension': 50,
# dimension of word embedding
'nepochs': 60,
# 60 is recommended
'savemodel': False}
print param
folder_name = os.path.basename(__file__).split('.')[0]
folder = os.path.join(os.path.dirname(__file__), folder_name)
if not os.path.exists(folder):
os.mkdir(folder)
# load the dataset
train_set, valid_set, test_set, dic = atisfold(param['fold'])
idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())
train_lex, train_ne, train_y = train_set
valid_lex, valid_ne, valid_y = valid_set
test_lex, test_ne, test_y = test_set
vocsize = len(set(reduce(lambda x, y: list(x) + list(y),
train_lex + valid_lex + test_lex)))
nclasses = len(set(reduce(lambda x, y: list(x)+list(y),
train_y + test_y + valid_y)))
nsentences = len(train_lex)
groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]
words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]
groundtruth_test = [map(lambda x: idx2label[x], y) for y in test_y]
words_test = [map(lambda x: idx2word[x], w) for w in test_lex]
# instanciate the model
numpy.random.seed(param['seed'])
random.seed(param['seed'])
rnn = RNNSLU(nh=param['nhidden'],
nc=nclasses,
ne=vocsize,
de=param['emb_dimension'],
cs=param['win'])
# train with early stopping on validation set
best_f1 = -numpy.inf
param['clr'] = param['lr']
for e in xrange(param['nepochs']):
# shuffle
shuffle([train_lex, train_ne, train_y], param['seed'])
param['ce'] = e
tic = timeit.default_timer()
for i, (x, y) in enumerate(zip(train_lex, train_y)):
print x.shape[0]
rnn.train(x, y, param['win'], param['clr'])
print '[learning] epoch %i >> %2.2f%%' % (
e, (i + 1) * 100. / nsentences),
print 'completed in %.2f (sec) <<\r' % (timeit.default_timer() - tic),
sys.stdout.flush()
# evaluation // back into the real world : idx -> words
predictions_test = [map(lambda x: idx2label[x],
rnn.classify(numpy.asarray(
contextwin(x, param['win'])).astype('int32')))
for x in test_lex]
predictions_valid = [map(lambda x: idx2label[x],
rnn.classify(numpy.asarray(
contextwin(x, param['win'])).astype('int32')))
for x in valid_lex]
# evaluation // compute the accuracy using conlleval.pl
res_test = conlleval(predictions_test,
groundtruth_test,
words_test,
folder + '/current.test.txt',
folder)
res_valid = conlleval(predictions_valid,
groundtruth_valid,
words_valid,
folder + '/current.valid.txt',
folder)
if res_valid['f1'] > best_f1:
if param['savemodel']:
rnn.save(folder)
best_rnn = copy.deepcopy(rnn)
best_f1 = res_valid['f1']
if param['verbose']:
print('NEW BEST: epoch', e,
'valid F1', res_valid['f1'],
'best test F1', res_test['f1'])
param['vf1'], param['tf1'] = res_valid['f1'], res_test['f1']
param['vp'], param['tp'] = res_valid['p'], res_test['p']
param['vr'], param['tr'] = res_valid['r'], res_test['r']
param['be'] = e
subprocess.call(['mv', folder + '/current.test.txt',
folder + '/best.test.txt'])
subprocess.call(['mv', folder + '/current.valid.txt',
folder + '/best.valid.txt'])
else:
if param['verbose']:
print ''
# learning rate decay if no improvement in 10 epochs
if param['decay'] and abs(param['be']-param['ce']) >= 10:
param['clr'] *= 0.5
rnn = best_rnn
if param['clr'] < 1e-5:
break
print('BEST RESULT: epoch', param['be'],
'valid F1', param['vf1'],
'best test F1', param['tf1'],
'with the model', folder)
if __name__ == '__main__':
main()
| {
"content_hash": "66286645b55ff6f98a0987f7f042276c",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 82,
"avg_line_length": 34.01776649746193,
"alnum_prop": 0.5023502200999777,
"repo_name": "webeng/DeepLearningTutorials",
"id": "936486ce01ffde057ba23f6ec382e365a73d2379",
"size": "13403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/rnnslu.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl6",
"bytes": "320"
},
{
"name": "Python",
"bytes": "775985"
},
{
"name": "Shell",
"bytes": "2777"
}
],
"symlink_target": ""
} |
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <huanyu@tencent.com>
# Feng Chen <phongchen@tencent.com>
# Yi Wang <yiwang@tencent.com>
# Chong Peng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the scons rules genearator module which invokes all
the builder objects or scons objects to generate scons rules.
"""
import os
import socket
import subprocess
import string
import time
import configparse
import console
from blade_platform import CcFlagsManager
def _incs_list_to_string(incs):
""" Convert incs list to string
['thirdparty', 'include'] -> -I thirdparty -I include
"""
return ' '.join(['-I ' + path for path in incs])
class SconsFileHeaderGenerator(object):
"""SconsFileHeaderGenerator class"""
def __init__(self, options, build_dir, gcc_version,
python_inc, cuda_inc, build_environment, svn_roots):
"""Init method. """
self.rules_buf = []
self.options = options
self.build_dir = build_dir
self.gcc_version = gcc_version
self.python_inc = python_inc
self.cuda_inc = cuda_inc
self.build_environment = build_environment
self.ccflags_manager = CcFlagsManager(options)
self.env_list = ['env_with_error', 'env_no_warning']
self.svn_roots = svn_roots
self.svn_info_map = {}
self.version_cpp_compile_template = string.Template("""
env_version = Environment(ENV = os.environ)
env_version.Append(SHCXXCOMSTR = console.erasable('%s$updateinfo%s' % (colors('cyan'), colors('end'))))
env_version.Append(CPPFLAGS = '-m$m')
version_obj = env_version.SharedObject('$filename')
""")
self.blade_config = configparse.blade_config
self.distcc_enabled = self.blade_config.get_config(
'distcc_config').get('enabled', False)
self.dccc_enabled = self.blade_config.get_config(
'link_config').get('enable_dccc', False)
def _add_rule(self, rule):
"""Append one rule to buffer. """
self.rules_buf.append('%s\n' % rule)
def _append_prefix_to_building_var(
self,
prefix='',
building_var='',
condition=False):
"""A helper method: append prefix to building var if condition is True."""
if condition:
return '%s %s' % (prefix, building_var)
else:
return building_var
def _exec_get_version_info(self, cmd, cwd, dirname):
lc_all_env = os.environ
lc_all_env['LC_ALL'] = 'POSIX'
p = subprocess.Popen(cmd,
env=lc_all_env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
std_out, std_err = p.communicate()
if p.returncode:
return False
else:
self.svn_info_map[dirname] = std_out.replace('\n', '\\n\\\n')
return True
def _get_version_info(self):
"""Gets svn root dir info. """
blade_root_dir = self.build_environment.blade_root_dir
if os.path.exists("%s/.git" % blade_root_dir):
cmd = "git log -n 1"
self._exec_get_version_info(cmd, None, os.path.dirname(blade_root_dir))
return
for root_dir in self.svn_roots:
root_dir_realpath = os.path.realpath(root_dir)
svn_working_dir = os.path.dirname(root_dir_realpath)
svn_dir = os.path.basename(root_dir_realpath)
cmd = 'svn info %s' % svn_dir
cwd = svn_working_dir
if not self._exec_get_version_info(cmd, cwd, root_dir):
cmd = 'git ls-remote --get-url && git branch | grep "*" && git log -n 1'
cwd = root_dir_realpath
if not self._exec_get_version_info(cmd, cwd, root_dir):
console.warning('failed to get version control info in %s' % root_dir)
def generate_version_file(self):
"""Generate version information files. """
self._get_version_info()
svn_info_len = len(self.svn_info_map)
if not os.path.exists(self.build_dir):
os.makedirs(self.build_dir)
version_cpp = open('%s/version.cpp' % self.build_dir, 'w')
print >>version_cpp, '/* This file was generated by blade */'
print >>version_cpp, 'extern "C" {'
print >>version_cpp, 'namespace binary_version {'
print >>version_cpp, 'extern const int kSvnInfoCount = %d;' % svn_info_len
svn_info_array = '{'
for idx in range(svn_info_len):
key_with_idx = self.svn_info_map.keys()[idx]
svn_info_line = '"%s"' % self.svn_info_map[key_with_idx]
svn_info_array += svn_info_line
if idx != (svn_info_len - 1):
svn_info_array += ','
svn_info_array += '}'
print >>version_cpp, 'extern const char* const kSvnInfo[%d] = %s;' % (
svn_info_len, svn_info_array)
print >>version_cpp, 'extern const char kBuildType[] = "%s";' % self.options.profile
print >>version_cpp, 'extern const char kBuildTime[] = "%s";' % time.asctime()
print >>version_cpp, 'extern const char kBuilderName[] = "%s";' % os.getenv('USER')
print >>version_cpp, (
'extern const char kHostName[] = "%s";' % socket.gethostname())
compiler = 'GCC %s' % self.gcc_version
print >>version_cpp, 'extern const char kCompiler[] = "%s";' % compiler
print >>version_cpp, '}}'
version_cpp.close()
self._add_rule('VariantDir("%s", ".", duplicate=0)' % self.build_dir)
self._add_rule(self.version_cpp_compile_template.substitute(
updateinfo='Updating version information',
m=self.options.m,
filename='%s/version.cpp' % self.build_dir))
def generate_imports_functions(self, blade_path):
"""Generates imports and functions. """
self._add_rule(
r"""
import sys
sys.path.insert(0, '%s')
""" % blade_path)
self._add_rule(
r"""
import os
import subprocess
import signal
import time
import socket
import glob
import blade_util
import console
import scons_helper
from build_environment import ScacheManager
from console import colors
from scons_helper import MakeAction
from scons_helper import create_fast_link_builders
from scons_helper import echospawn
from scons_helper import error_colorize
from scons_helper import generate_python_binary
from scons_helper import generate_resource_file
from scons_helper import generate_resource_index
""")
if getattr(self.options, 'verbose', False):
self._add_rule('scons_helper.option_verbose = True')
self._add_rule((
"""if not os.path.exists('%s'):
os.mkdir('%s')""") % (self.build_dir, self.build_dir))
def generate_top_level_env(self):
"""generates top level environment. """
self._add_rule('os.environ["LC_ALL"] = "C"')
self._add_rule('top_env = Environment(ENV=os.environ)')
# Optimization options, see http://www.scons.org/wiki/GoFastButton
self._add_rule('top_env.Decider("MD5-timestamp")')
self._add_rule('top_env.SetOption("implicit_cache", 1)')
self._add_rule('top_env.SetOption("max_drift", 1)')
def generate_compliation_verbose(self):
"""Generates color and verbose message. """
self._add_rule('console.color_enabled=%s' % console.color_enabled)
if not getattr(self.options, 'verbose', False):
self._add_rule('top_env["SPAWN"] = echospawn')
self._add_rule(
"""
compile_proto_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_python_message = console.erasable( '%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_fbthrift_cpp_message = console.erasable('%sCompiling %s$SOURCE%s to cpp source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_fbthrift_cpp2_message = console.erasable('%sCompiling %s$SOURCE%s to cpp2 source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_resource_index_message = console.erasable('%sGenerating resource index for %s$SOURCE_PATH/$TARGET_NAME%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_resource_message = console.erasable('%sCompiling %s$SOURCE%s as resource file%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_source_message = console.erasable('%sCompiling %s$SOURCE%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
assembling_source_message = console.erasable('%sAssembling %s$SOURCE%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
link_program_message = console.inerasable('%sLinking Program %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
link_library_message = console.inerasable('%sCreating Static Library %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
ranlib_library_message = console.inerasable('%sRanlib Library %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
link_shared_library_message = console.inerasable('%sLinking Shared Library %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
compile_java_jar_message = console.inerasable('%sGenerating java jar %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_python_binary_message = console.erasable('%sGenerating python binary %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_yacc_message = console.erasable('%sYacc %s$SOURCE%s to $TARGET%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
""")
if not getattr(self.options, 'verbose', False):
self._add_rule(
r"""
top_env.Append(
CXXCOMSTR = compile_source_message,
CCCOMSTR = compile_source_message,
ASCOMSTR = assembling_source_message,
SHCCCOMSTR = compile_source_message,
SHCXXCOMSTR = compile_source_message,
ARCOMSTR = link_library_message,
RANLIBCOMSTR = ranlib_library_message,
SHLINKCOMSTR = link_shared_library_message,
LINKCOMSTR = link_program_message,
JAVACCOMSTR = compile_source_message
)""")
def _generate_fast_link_builders(self):
"""Generates fast link builders if it is specified in blade bash. """
link_config = configparse.blade_config.get_config('link_config')
enable_dccc = link_config['enable_dccc']
if link_config['link_on_tmp']:
if (not enable_dccc) or (
enable_dccc and not self.build_environment.dccc_env_prepared):
self._add_rule('create_fast_link_builders(top_env)')
def generate_builders(self):
"""Generates common builders. """
# Generates builders specified in blade bash at first
self._generate_fast_link_builders()
proto_config = configparse.blade_config.get_config('proto_library_config')
protoc_bin = proto_config['protoc']
protobuf_path = proto_config['protobuf_path']
protobuf_incs_str = _incs_list_to_string(proto_config['protobuf_incs'])
protobuf_php_path = proto_config['protobuf_php_path']
protoc_php_plugin = proto_config['protoc_php_plugin']
# Genreates common builders now
builder_list = []
self._add_rule('time_value = Value("%s")' % time.asctime())
self._add_rule(
'proto_bld = Builder(action = MakeAction("%s --proto_path=. -I. %s'
' -I=`dirname $SOURCE` --cpp_out=%s $SOURCE", '
'compile_proto_cc_message))' % (
protoc_bin, protobuf_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"Proto" : proto_bld}')
self._add_rule(
'proto_java_bld = Builder(action = MakeAction("%s --proto_path=. '
'--proto_path=%s --java_out=%s/`dirname $SOURCE` $SOURCE", '
'compile_proto_java_message))' % (
protoc_bin, protobuf_path, self.build_dir))
builder_list.append('BUILDERS = {"ProtoJava" : proto_java_bld}')
self._add_rule(
'proto_php_bld = Builder(action = MakeAction("%s '
'--proto_path=. --plugin=protoc-gen-php=%s '
'-I. %s -I%s -I=`dirname $SOURCE` '
'--php_out=%s/`dirname $SOURCE` '
'$SOURCE", compile_proto_php_message))' % (
protoc_bin, protoc_php_plugin, protobuf_incs_str,
protobuf_php_path, self.build_dir))
builder_list.append('BUILDERS = {"ProtoPhp" : proto_php_bld}')
self._add_rule(
'proto_python_bld = Builder(action = MakeAction("%s '
'--proto_path=. '
'-I. %s -I=`dirname $SOURCE` '
'--python_out=%s '
'$SOURCE", compile_proto_python_message))' % (
protoc_bin, protobuf_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"ProtoPython" : proto_python_bld}')
# Generate thrift library builders.
thrift_config = configparse.blade_config.get_config('thrift_config')
thrift_incs_str = _incs_list_to_string(thrift_config['thrift_incs'])
thrift_bin = thrift_config['thrift']
if thrift_bin.startswith('//'):
thrift_bin = thrift_bin.replace('//', self.build_dir + '/')
thrift_bin = thrift_bin.replace(':', '/')
# Genreates common builders now
self._add_rule(
'thrift_bld = Builder(action = MakeAction("%s '
'--gen cpp:include_prefix,pure_enums -I . %s -I `dirname $SOURCE` '
'-out %s/`dirname $SOURCE` $SOURCE", compile_thrift_cc_message))' % (
thrift_bin, thrift_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"Thrift" : thrift_bld}')
self._add_rule(
'thrift_java_bld = Builder(action = MakeAction("%s '
'--gen java -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` '
'$SOURCE", compile_thrift_java_message))' % (
thrift_bin, thrift_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"ThriftJava" : thrift_java_bld}')
self._add_rule(
'thrift_python_bld = Builder(action = MakeAction("%s '
'--gen py -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` '
'$SOURCE", compile_thrift_python_message))' % (
thrift_bin, thrift_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"ThriftPython" : thrift_python_bld}')
fbthrift_config = configparse.blade_config.get_config('fbthrift_config')
fbthrift1_bin = fbthrift_config['fbthrift1']
fbthrift2_bin = fbthrift_config['fbthrift2']
fbthrift_incs_str = _incs_list_to_string(fbthrift_config['fbthrift_incs'])
self._add_rule(
'fbthrift1_bld = Builder(action = MakeAction("%s '
'--gen cpp:templates,cob_style,include_prefix,enum_strict -I . %s -I `dirname $SOURCE` '
'-o %s/`dirname $SOURCE` $SOURCE", compile_fbthrift_cpp_message))' % (
fbthrift1_bin, fbthrift_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"FBThrift1" : fbthrift1_bld}')
self._add_rule(
'fbthrift2_bld = Builder(action = MakeAction("%s '
'--gen=cpp2:cob_style,include_prefix,future -I . %s -I `dirname $SOURCE` '
'-o %s/`dirname $SOURCE` $SOURCE", compile_fbthrift_cpp2_message))' % (
fbthrift2_bin, fbthrift_incs_str, self.build_dir))
builder_list.append('BUILDERS = {"FBThrift2" : fbthrift2_bld}')
self._add_rule(
r"""
blade_jar_bld = Builder(action = MakeAction('jar cf $TARGET -C `dirname $SOURCE` .',
compile_java_jar_message))
yacc_bld = Builder(action = MakeAction('bison $YACCFLAGS -d -o $TARGET $SOURCE',
compile_yacc_message))
resource_index_bld = Builder(action = MakeAction(generate_resource_index,
compile_resource_index_message))
resource_file_bld = Builder(action = MakeAction(generate_resource_file,
compile_resource_message))
python_binary_bld = Builder(action = MakeAction(generate_python_binary,
compile_python_binary_message))
""")
builder_list.append('BUILDERS = {"BladeJar" : blade_jar_bld}')
builder_list.append('BUILDERS = {"Yacc" : yacc_bld}')
builder_list.append('BUILDERS = {"ResourceIndex" : resource_index_bld}')
builder_list.append('BUILDERS = {"ResourceFile" : resource_file_bld}')
builder_list.append('BUILDERS = {"PythonBinary" : python_binary_bld}')
for builder in builder_list:
self._add_rule('top_env.Append(%s)' % builder)
def generate_compliation_flags(self):
"""Generates compliation flags. """
toolchain_dir = os.environ.get('TOOLCHAIN_DIR', '')
if toolchain_dir and not toolchain_dir.endswith('/'):
toolchain_dir += '/'
cpp_str = toolchain_dir + os.environ.get('CPP', 'cpp')
cc_str = toolchain_dir + os.environ.get('CC', 'gcc')
cxx_str = toolchain_dir + os.environ.get('CXX', 'g++')
nvcc_str = toolchain_dir + os.environ.get('NVCC', 'nvcc')
ld_str = toolchain_dir + os.environ.get('LD', 'g++')
console.info('CPP=%s' % cpp_str)
console.info('CC=%s' % cc_str)
console.info('CXX=%s' % cxx_str)
console.info('NVCC=%s' % nvcc_str)
console.info('LD=%s' % ld_str)
self.ccflags_manager.set_cpp_str(cpp_str)
# To modify CC, CXX, LD according to the building environment and
# project configuration
build_with_distcc = (self.distcc_enabled and
self.build_environment.distcc_env_prepared)
cc_str = self._append_prefix_to_building_var(
prefix='distcc',
building_var=cc_str,
condition=build_with_distcc)
cxx_str = self._append_prefix_to_building_var(
prefix='distcc',
building_var=cxx_str,
condition=build_with_distcc)
build_with_ccache = self.build_environment.ccache_installed
cc_str = self._append_prefix_to_building_var(
prefix='ccache',
building_var=cc_str,
condition=build_with_ccache)
cxx_str = self._append_prefix_to_building_var(
prefix='ccache',
building_var=cxx_str,
condition=build_with_ccache)
build_with_dccc = (self.dccc_enabled and
self.build_environment.dccc_env_prepared)
ld_str = self._append_prefix_to_building_var(
prefix='dccc',
building_var=ld_str,
condition=build_with_dccc)
cc_env_str = 'CC="%s", CXX="%s"' % (cc_str, cxx_str)
ld_env_str = 'LINK="%s"' % ld_str
nvcc_env_str = 'NVCC="%s"' % nvcc_str
cc_config = configparse.blade_config.get_config('cc_config')
extra_incs = cc_config['extra_incs']
extra_incs_str = ', '.join(['"%s"' % inc for inc in extra_incs])
if not extra_incs_str:
extra_incs_str = '""'
(cppflags_except_warning, linkflags) = self.ccflags_manager.get_flags_except_warning()
builder_list = []
cuda_incs_str = ' '.join(['-I%s' % inc for inc in self.cuda_inc])
self._add_rule(
'nvcc_object_bld = Builder(action = MakeAction("%s -ccbin g++ %s '
'$NVCCFLAGS -o $TARGET -c $SOURCE", compile_source_message))' % (
nvcc_str, cuda_incs_str))
builder_list.append('BUILDERS = {"NvccObject" : nvcc_object_bld}')
self._add_rule(
'nvcc_binary_bld = Builder(action = MakeAction("%s %s '
'$NVCCFLAGS -o $TARGET ", link_program_message))' % (
nvcc_str, cuda_incs_str))
builder_list.append('BUILDERS = {"NvccBinary" : nvcc_binary_bld}')
for builder in builder_list:
self._add_rule('top_env.Append(%s)' % builder)
self._add_rule('top_env.Replace(%s, %s, '
'CPPPATH=[%s, "%s", "%s"], '
'CPPFLAGS=%s, CFLAGS=%s, CXXFLAGS=%s, '
'%s, LINKFLAGS=%s)' %
(cc_env_str, nvcc_env_str,
extra_incs_str, self.build_dir, self.python_inc,
cc_config['cppflags'] + cppflags_except_warning,
cc_config['cflags'],
cc_config['cxxflags'],
ld_env_str, linkflags))
self._setup_cache()
if build_with_distcc:
self.build_environment.setup_distcc_env()
for rule in self.build_environment.get_rules():
self._add_rule(rule)
self._setup_warnings()
def _setup_warnings(self):
for env in self.env_list:
self._add_rule('%s = top_env.Clone()' % env)
(warnings, cxx_warnings, c_warnings) = self.ccflags_manager.get_warning_flags()
self._add_rule('%s.Append(CPPFLAGS=%s, CFLAGS=%s, CXXFLAGS=%s)' % (
self.env_list[0],
warnings, c_warnings, cxx_warnings))
def _setup_cache(self):
if self.build_environment.ccache_installed:
self.build_environment.setup_ccache_env()
else:
cache_dir = os.path.expanduser('~/.bladescache')
cache_size = 4 * 1024 * 1024 * 1024
if hasattr(self.options, 'cache_dir'):
if not self.options.cache_dir:
return
cache_dir = self.options.cache_dir
else:
console.info('using default cache dir: %s' % cache_dir)
if hasattr(self.options, 'cache_size') and (self.options.cache_size != -1):
cache_size = self.options.cache_size
self._add_rule('CacheDir("%s")' % cache_dir)
self._add_rule('scache_manager = ScacheManager("%s", cache_limit=%d)' % (
cache_dir, cache_size))
self._add_rule('Progress(scache_manager, interval=100)')
self._add_rule('console.info("using cache directory %s")' % cache_dir)
self._add_rule('console.info("scache size %d")' % cache_size)
def generate(self, blade_path):
"""Generates all rules. """
self.generate_imports_functions(blade_path)
self.generate_top_level_env()
self.generate_compliation_verbose()
self.generate_version_file()
self.generate_builders()
self.generate_compliation_flags()
return self.rules_buf
class SconsRulesGenerator(object):
"""The main class to generate scons rules and outputs rules to SConstruct. """
def __init__(self, scons_path, blade_path, blade):
"""Init method. """
self.scons_path = scons_path
self.blade_path = blade_path
self.blade = blade
self.scons_platform = self.blade.get_scons_platform()
build_dir = self.blade.get_build_path()
options = self.blade.get_options()
gcc_version = self.scons_platform.get_gcc_version()
python_inc = self.scons_platform.get_python_include()
cuda_inc = self.scons_platform.get_cuda_include()
self.scons_file_header_generator = SconsFileHeaderGenerator(
options,
build_dir,
gcc_version,
python_inc,
cuda_inc,
self.blade.build_environment,
self.blade.svn_root_dirs)
try:
os.remove('blade-bin')
except os.error:
pass
os.symlink(os.path.abspath(build_dir), 'blade-bin')
def generate_scons_script(self):
"""Generates SConstruct script. """
rules_buf = self.scons_file_header_generator.generate(self.blade_path)
rules_buf += self.blade.gen_targets_rules()
# Write to SConstruct
self.scons_file_fd = open(self.scons_path, 'w')
self.scons_file_fd.writelines(rules_buf)
self.scons_file_fd.close()
return rules_buf
| {
"content_hash": "5a21f6a24af97f22a266e08147ced15d",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 119,
"avg_line_length": 42.35598705501618,
"alnum_prop": 0.5859183985330073,
"repo_name": "Lunewcome/typhoon-blade",
"id": "8288fc9cf906108e01d4fc56be96d3aa0ab71733",
"size": "26176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blade/rules_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "293"
},
{
"name": "C++",
"bytes": "2267"
},
{
"name": "Cuda",
"bytes": "5412"
},
{
"name": "Objective-C",
"bytes": "83"
},
{
"name": "Protocol Buffer",
"bytes": "351"
},
{
"name": "Python",
"bytes": "463043"
},
{
"name": "Shell",
"bytes": "16669"
},
{
"name": "Thrift",
"bytes": "6217"
},
{
"name": "VimL",
"bytes": "7375"
}
],
"symlink_target": ""
} |
def autoretry_datastore_timeouts(attempts=5.0, interval=0.1, exponent=2.0):
"""
Copyright (C) 2009 twitter.com/rcb
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
======================================================================
This function wraps the AppEngine Datastore API to autoretry
datastore timeouts at the lowest accessible level.
The benefits of this approach are:
1. Small Footprint: Does not monkey with Model internals
which may break in future releases.
2. Max Performance: Retrying at this lowest level means
serialization and key formatting is not
needlessly repeated on each retry.
At initialization time, execute this:
>>> autoretry_datastore_timeouts()
Should only be called once, subsequent calls have no effect.
>>> autoretry_datastore_timeouts() # no effect
Default (5) attempts: .1, .2, .4, .8, 1.6 seconds
Parameters can each be specified as floats.
:param attempts: maximum number of times to retry.
:param interval: base seconds to sleep between retries.
:param exponent: rate of exponential back-off.
"""
import time, logging
from google.appengine.api import apiproxy_stub_map
from google.appengine.runtime import apiproxy_errors
from google.appengine.datastore import datastore_pb
attempts = float(attempts)
interval = float(interval)
exponent = float(exponent)
wrapped = apiproxy_stub_map.MakeSyncCall
errors = {datastore_pb.Error.TIMEOUT:'Timeout',
datastore_pb.Error.CONCURRENT_TRANSACTION:'TransactionFailedError'}
def wrapper(*args, **kwargs):
count = 0.0
while True:
try:
return wrapped(*args, **kwargs)
except apiproxy_errors.ApplicationError, err:
errno = err.application_error
if errno not in errors: raise
sleep = (exponent ** count) * interval
count += 1.0
if count > attempts: raise
msg = "Datastore %s: retry #%d in %s seconds.\n%s"
vals = ''
if count == 1.0:
vals = '\n'.join([str(a) for a in args])
logging.warning(msg % (errors[errno], count, sleep, vals))
time.sleep(sleep)
setattr(wrapper, '_autoretry_datastore_timeouts', False)
if getattr(wrapped, '_autoretry_datastore_timeouts', True):
apiproxy_stub_map.MakeSyncCall = wrapper
| {
"content_hash": "534524006baf26a9e2f59b732b538b32",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 42.43023255813954,
"alnum_prop": 0.6404494382022472,
"repo_name": "limscoder/amfast",
"id": "c6efdfb15610ef3516e57eaa87a40ef21c6c8ace",
"size": "3649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/gae/autoretry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "10167"
},
{
"name": "C",
"bytes": "184296"
},
{
"name": "HTML",
"bytes": "1738"
},
{
"name": "Python",
"bytes": "365979"
}
],
"symlink_target": ""
} |
"""Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack",
"redirect_stdout", "redirect_stderr", "suppress"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class _RedirectStream:
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file.
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
class suppress:
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| {
"content_hash": "6b01c6ae8ce9cf9b231842d98a3a7a1b",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 80,
"avg_line_length": 33.988919667590025,
"alnum_prop": 0.5801955990220049,
"repo_name": "munyirik/python",
"id": "379c2515d50b60ec31e3ea6f6beb95c2aa6e672a",
"size": "12270",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cpython/Lib/contextlib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "470920"
},
{
"name": "Batchfile",
"bytes": "35551"
},
{
"name": "C",
"bytes": "17872871"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "356072"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "254942"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "Makefile",
"bytes": "25026"
},
{
"name": "Objective-C",
"bytes": "33182"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24911704"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from gii.core.model import *
from PropertyEditor import FieldEditor, registerSimpleFieldEditorFactory
from gii.SearchView import requestSearchView
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
##----------------------------------------------------------------##
class CollectionFieldButton( QtGui.QToolButton ):
def sizeHint( self ):
return QtCore.QSize( 20, 20)
##----------------------------------------------------------------##
class CollectionFieldEditor( FieldEditor ):
def setTarget( self, parent, field ):
super( CollectionFieldEditor, self ).setTarget( parent, field )
t = field.getType()
self.targetType = t.itemType
self.targetContext = None #TODO
self.value = None
def get( self ):
#TODO
pass
def set( self, value ):
self.value = value
if value:
self.button.setText( '[...]' )
else:
self.button.setText( '[]' )
def setValue( self, value ):
self.set( value )
self.notifyChanged( value )
def initEditor( self, container ):
self.button = CollectionFieldButton( container )
self.button.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
self.button.setText( '[]' )
if self.getOption( 'readonly', False ):
self.button.setEnabled( False )
self.button.clicked.connect( self.openSearchView )
return self.button
def openSearchView( self ):
requestSearchView(
context = 'scene',
type = self.targetType,
multiple_selection = True,
on_selection = self.onSearchSelection,
on_cancel = self.onSearchCancel,
initial = self.value
)
def onSearchSelection( self, value ):
self.setValue( value )
self.setFocus()
def onSearchCancel( self ):
self.setFocus()
def setFocus( self ):
self.button.setFocus()
registerSimpleFieldEditorFactory( CollectionType, CollectionFieldEditor )
| {
"content_hash": "cbc0c967feeddb4ef199cd70a2abfe7a",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 73,
"avg_line_length": 26.014084507042252,
"alnum_prop": 0.6556578234975636,
"repo_name": "tommo/gii",
"id": "31b96dcb456913413ac54223bebc6def9f614b47",
"size": "1847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/gii/qt/controls/PropertyEditor/CollectionFieldEditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
} |
"""
WSGI config for lab7 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lab7.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "5d82916c0bbaff840f3ae5c1a8572323",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.357142857142858,
"alnum_prop": 0.7702349869451697,
"repo_name": "alejo8591/angular-labs",
"id": "ae6f52f7b801909ce3bce529a064d4fdfae553b2",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab15/lab7/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12824"
},
{
"name": "HTML",
"bytes": "27802"
},
{
"name": "JavaScript",
"bytes": "5528"
},
{
"name": "PLpgSQL",
"bytes": "1643"
},
{
"name": "Python",
"bytes": "35867"
}
],
"symlink_target": ""
} |
"""Module containing the Event base class."""
class Event:
"""This base class represent a model event.
A model event could be called in different generic situations,
for instance when the model object is loaded from a data connector,
or created, or updated. For each of this event, a class inherited
from Event should be created defining the behaviour of this event
and its expected arguments.
This base class doesn't define the main methods: '__init__' and
'__call__'. Both of them must be created in the subclasses.
"""
pass
| {
"content_hash": "b46a5f418fce4fa1875b5f6669753485",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 31.944444444444443,
"alnum_prop": 0.7026086956521739,
"repo_name": "v-legoff/pa-poc3",
"id": "f04c6504e86018a8c2d297d2b26daedc4c9ee30c",
"size": "2115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/model/events/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "12354"
},
{
"name": "Python",
"bytes": "643635"
},
{
"name": "Shell",
"bytes": "6471"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('annotations', '0008_auto_20170826_1533'),
]
operations = [
migrations.AlterField(
model_name='annotation',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='images.Image'),
),
]
| {
"content_hash": "a00150a5faeb7199c0a56c24fb5919f4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 128,
"avg_line_length": 26.105263157894736,
"alnum_prop": 0.6451612903225806,
"repo_name": "bit-bots/imagetagger",
"id": "d4ede40898283be926f9fb664a0935dc174d3e4b",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/imagetagger/annotations/migrations/0009_auto_20170826_1535.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12288"
},
{
"name": "Dockerfile",
"bytes": "2049"
},
{
"name": "HTML",
"bytes": "273837"
},
{
"name": "JavaScript",
"bytes": "234939"
},
{
"name": "Python",
"bytes": "252248"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
import collections
import os
import re
from xml.etree import ElementTree
from util import build_utils
from util import resource_utils
_TextSymbolEntry = collections.namedtuple(
'RTextEntry', ('java_type', 'resource_type', 'name', 'value'))
_DUMMY_RTXT_ID = '0x7f010001'
_DUMMY_RTXT_INDEX = '1'
def _ResourceNameToJavaSymbol(resource_name):
return re.sub('[\.:]', '_', resource_name)
class RTxtGenerator:
def __init__(self,
res_dirs,
ignore_pattern=resource_utils.AAPT_IGNORE_PATTERN):
self.res_dirs = res_dirs
self.ignore_pattern = ignore_pattern
def _ParseDeclareStyleable(self, node):
ret = set()
stylable_name = _ResourceNameToJavaSymbol(node.attrib['name'])
ret.add(
_TextSymbolEntry('int[]', 'styleable', stylable_name,
'{{{}}}'.format(_DUMMY_RTXT_ID)))
for child in node:
if child.tag == 'eat-comment':
continue
if child.tag != 'attr':
# This parser expects everything inside <declare-stylable/> to be either
# an attr or an eat-comment. If new resource xml files are added that do
# not conform to this, this parser needs updating.
raise Exception('Unexpected tag {} inside <delcare-stylable/>'.format(
child.tag))
entry_name = '{}_{}'.format(
stylable_name, _ResourceNameToJavaSymbol(child.attrib['name']))
ret.add(
_TextSymbolEntry('int', 'styleable', entry_name, _DUMMY_RTXT_INDEX))
if not child.attrib['name'].startswith('android:'):
resource_name = _ResourceNameToJavaSymbol(child.attrib['name'])
ret.add(_TextSymbolEntry('int', 'attr', resource_name, _DUMMY_RTXT_ID))
for entry in child:
if entry.tag not in ('enum', 'flag'):
# This parser expects everything inside <attr/> to be either an
# <enum/> or an <flag/>. If new resource xml files are added that do
# not conform to this, this parser needs updating.
raise Exception('Unexpected tag {} inside <attr/>'.format(entry.tag))
resource_name = _ResourceNameToJavaSymbol(entry.attrib['name'])
ret.add(_TextSymbolEntry('int', 'id', resource_name, _DUMMY_RTXT_ID))
return ret
def _ExtractNewIdsFromNode(self, node):
ret = set()
# Sometimes there are @+id/ in random attributes (not just in android:id)
# and apparently that is valid. See:
# https://developer.android.com/reference/android/widget/RelativeLayout.LayoutParams.html
for value in node.attrib.values():
if value.startswith('@+id/'):
resource_name = value[5:]
ret.add(_TextSymbolEntry('int', 'id', resource_name, _DUMMY_RTXT_ID))
for child in node:
ret.update(self._ExtractNewIdsFromNode(child))
return ret
def _ParseXml(self, xml_path):
try:
return ElementTree.parse(xml_path).getroot()
except Exception as e:
raise RuntimeError('Failure parsing {}:\n'.format(xml_path)) from e
def _ExtractNewIdsFromXml(self, xml_path):
return self._ExtractNewIdsFromNode(self._ParseXml(xml_path))
def _ParseValuesXml(self, xml_path):
ret = set()
root = self._ParseXml(xml_path)
assert root.tag == 'resources'
for child in root:
if child.tag == 'eat-comment':
# eat-comment is just a dummy documentation element.
continue
if child.tag == 'skip':
# skip is just a dummy element.
continue
if child.tag == 'declare-styleable':
ret.update(self._ParseDeclareStyleable(child))
else:
if child.tag == 'item':
resource_type = child.attrib['type']
elif child.tag in ('array', 'integer-array', 'string-array'):
resource_type = 'array'
else:
resource_type = child.tag
name = _ResourceNameToJavaSymbol(child.attrib['name'])
ret.add(_TextSymbolEntry('int', resource_type, name, _DUMMY_RTXT_ID))
return ret
def _CollectResourcesListFromDirectory(self, res_dir):
ret = set()
globs = resource_utils._GenerateGlobs(self.ignore_pattern)
for root, _, files in os.walk(res_dir):
resource_type = os.path.basename(root)
if '-' in resource_type:
resource_type = resource_type[:resource_type.index('-')]
for f in files:
if build_utils.MatchesGlob(f, globs):
continue
if resource_type == 'values':
ret.update(self._ParseValuesXml(os.path.join(root, f)))
else:
if '.' in f:
resource_name = f[:f.index('.')]
else:
resource_name = f
ret.add(
_TextSymbolEntry('int', resource_type, resource_name,
_DUMMY_RTXT_ID))
# Other types not just layouts can contain new ids (eg: Menus and
# Drawables). Just in case, look for new ids in all files.
if f.endswith('.xml'):
ret.update(self._ExtractNewIdsFromXml(os.path.join(root, f)))
return ret
def _CollectResourcesListFromDirectories(self):
ret = set()
for res_dir in self.res_dirs:
ret.update(self._CollectResourcesListFromDirectory(res_dir))
return ret
def WriteRTxtFile(self, rtxt_path):
resources = self._CollectResourcesListFromDirectories()
with build_utils.AtomicOutput(rtxt_path, mode='w') as f:
for resource in resources:
line = '{0.java_type} {0.resource_type} {0.name} {0.value}\n'.format(
resource)
f.write(line)
| {
"content_hash": "86b98534b2a6edacf0d369e3cc01db89",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 93,
"avg_line_length": 38.083333333333336,
"alnum_prop": 0.6267323121808899,
"repo_name": "scheib/chromium",
"id": "6d2621a41eefe8cef1bfec455940e7d4c00bfb9c",
"size": "5647",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "build/android/gyp/util/resources_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
GitHub OAuth support.
This contribution adds support for GitHub OAuth service. The settings
GITHUB_APP_ID and GITHUB_API_SECRET must be defined with the values
given by GitHub application registration process.
GITHUB_ORGANIZATION is an optional setting that will allow you to constrain
authentication to a given GitHub organization.
Extended permissions are supported by defining GITHUB_EXTENDED_PERMISSIONS
setting, it must be a list of values to request.
By default account id and token expiration time are stored in extra_data
field, check OAuthBackend class for details on how to extend it.
"""
from urllib import urlencode
from urllib2 import HTTPError
from django.utils import simplejson
from django.conf import settings
from social_auth.utils import dsa_urlopen
from social_auth.backends import BaseOAuth2, OAuthBackend
# GitHub configuration
GITHUB_AUTHORIZATION_URL = 'https://github.com/login/oauth/authorize'
GITHUB_ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
GITHUB_USER_DATA_URL = 'https://api.github.com/user'
# GitHub organization configuration
GITHUB_ORGANIZATION_MEMBER_OF_URL = \
'https://api.github.com/orgs/{org}/members/{username}'
GITHUB_SERVER = 'github.com'
class GithubBackend(OAuthBackend):
"""Github OAuth authentication backend"""
name = 'github'
# Default extra data to store
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Github account"""
return {'username': response.get('login'),
'email': response.get('email') or '',
'first_name': response.get('name')}
class GithubAuth(BaseOAuth2):
"""Github OAuth2 mechanism"""
AUTHORIZATION_URL = GITHUB_AUTHORIZATION_URL
ACCESS_TOKEN_URL = GITHUB_ACCESS_TOKEN_URL
AUTH_BACKEND = GithubBackend
SETTINGS_KEY_NAME = 'GITHUB_APP_ID'
SETTINGS_SECRET_NAME = 'GITHUB_API_SECRET'
SCOPE_SEPARATOR = ','
# Look at http://developer.github.com/v3/oauth/
SCOPE_VAR_NAME = 'GITHUB_EXTENDED_PERMISSIONS'
GITHUB_ORGANIZATION = getattr(settings, 'GITHUB_ORGANIZATION', None)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = GITHUB_USER_DATA_URL + '?' + urlencode({
'access_token': access_token
})
try:
data = simplejson.load(dsa_urlopen(url))
except ValueError:
data = None
# if we have a github organization defined, test that the current users
# is a member of that organization.
if data and self.GITHUB_ORGANIZATION:
member_url = GITHUB_ORGANIZATION_MEMBER_OF_URL.format(
org=self.GITHUB_ORGANIZATION,
username=data.get('login')
)
try:
response = dsa_urlopen(member_url)
except HTTPError:
data = None
else:
# if the user is a member of the organization, response code
# will be 204, see:
# http://developer.github.com/v3/orgs/members/#response-if-requester-is-an-organization-member-and-user-is-a-member
if not response.code == 204:
data = None
return data
# Backend definition
BACKENDS = {
'github': GithubAuth,
}
| {
"content_hash": "9ab9f2215882b8632100570b7d1119ca",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 133,
"avg_line_length": 32.99029126213592,
"alnum_prop": 0.659211300765156,
"repo_name": "gugu/django-social-auth-1",
"id": "eb92e03cfef7d41db2483bf46af0824446cdef8e",
"size": "3398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_auth/backends/contrib/github.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "307601"
}
],
"symlink_target": ""
} |
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import re
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from ceilometer.openstack.common import fileutils
from ceilometer.openstack.common.gettextutils import _, _LE
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
fileutils.delete_cached_file(self.policy_path)
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(
self.policy_path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError((self.policy_file,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %r") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
# TODO(termie): do dict inspection via dot syntax
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
leftval = creds[self.kind]
except KeyError:
return False
return match == six.text_type(leftval)
| {
"content_hash": "c8d119c83938c788b9be73de216f0b11",
"timestamp": "",
"source": "github",
"line_count": 880,
"max_line_length": 78,
"avg_line_length": 30.2125,
"alnum_prop": 0.6003686011960733,
"repo_name": "luogangyi/Ceilometer-oVirt",
"id": "c2b5189c8ee5924fd838c23a62f8788b3d581099",
"size": "27228",
"binary": false,
"copies": "5",
"ref": "refs/heads/stable/juno",
"path": "ceilometer/openstack/common/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5438675"
},
{
"name": "Shell",
"bytes": "1304"
}
],
"symlink_target": ""
} |
'''
FanFilm Add-on
Copyright (C) 2015 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,datetime, urllib,zipfile
import os,base64,StringIO,time
from resources.lib.libraries import client
from resources.lib.libraries import cleantitle
from resources.lib.libraries import workers
from resources.lib.libraries import control
from resources.lib.resolvers import cloudzilla
from resources.lib.resolvers import openload
from resources.lib.resolvers import uptobox
from resources.lib.resolvers import zstream
from resources.lib.resolvers import videomega
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
from resources.lib import resolvers
class source:
def __init__(self):
self.data_link = 'aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL21ya25vdy9kYXRhYmFzZS9tYXN0ZXIvc2VyaWVzd2F0Y2guemlw'
self.sources = []
def get_movie(self, imdb, title, year):
return None
try:
url = '%s %s' % (title, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = tvshowtitle
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = '%s S%02dE%02d' % (url, int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
data = os.path.join(control.dataPath, 'serieswatch.db')
download = True
try: download = abs(datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7)
except: pass
if download == True:
result = client.request(base64.b64decode(self.data_link))
print(len(result))
control.log(">>>>>>>>>>>>>>> ONEC Downloading" )
zip = zipfile.ZipFile(StringIO.StringIO(result))
zip.extractall(control.dataPath)
zip.close()
dbcon = database.connect(data)
dbcur = dbcon.cursor()
content = re.compile('(.+?)\sS\d*E\d*$').findall(url)
if len(content) == 0:
title, year = re.compile('(.+?) (\d{4})$').findall(url)[0]
title = cleantitle.movie(title)
dbcur.execute("SELECT * FROM movies WHERE title like '%"+title+"%' and title like '%"+year+"%'" )
result = dbcur.fetchall()
else:
tvshowtitle, season, episode = re.compile('(.+?)\sS(\d*)E(\d*)$').findall(url)[0]
tvshowtitle = cleantitle.movie(tvshowtitle)
myses = 's%se%s' % (season,episode)
control.log(">>>>>>>>>>>>>>> ONEC %s season |%s|" % (tvshowtitle,myses))
mysql = "SELECT * FROM movies WHERE title like '%"+tvshowtitle+"%' and title like '%"+myses+"%'"
control.log(">>>>>>>>>>>>>>> ONEC SQL |%s|" % (mysql))
dbcur.execute(mysql)
result = dbcur.fetchall()
for myurl in result:
result = myurl[1]
if any(word in result.lower() for word in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts']):
quality = 'CAM'
elif '1080' in result:
quality = '1080p'
elif '720p' in result:
quality = 'HD'
else:
quality = 'SD'
links = myurl[0]
#links = [i for i in links if i.startswith('http')]
if not any(word in links.lower() for word in ['mp3', 'farsi', 'ganool']):
#print("Mamy", links)
sources.append({'source': 'Serieswatch', 'quality': quality, 'provider': 'Serieswatch', 'url': links})
return sources
except:
return
def resolve(self, url):
try:
url = resolvers.request(url)
return url
except:
return
| {
"content_hash": "51ed870f1911dc940c8077dd1b2ff70e",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 145,
"avg_line_length": 34.29801324503311,
"alnum_prop": 0.5663255454720989,
"repo_name": "rysson/filmkodi",
"id": "5fc3bb7a43d2b6c2029d08d8b6fd5da13b8d57d9",
"size": "5204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugin.video.fanfilm/resources/lib/sources/disabled/serieswatch_mv_tv.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
} |
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromConstructor(test_util.TensorFlowTestCase):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testSizeNoneInvalid(self):
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testBatchSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantize(self):
np.random.seed(0)
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.post_training_quantize = True
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testFlexMode(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.converter_mode = lite.ConverterMode.TOCO_FLEX_ALL
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
# TODO(nupurgarg): Test model loading in open source.
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
self._graph_def_file = resource_loader.get_path_to_datafile(
'testdata/tflite_graph.pb')
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
def testFloatTocoConverter(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testSimpleModelTocoConverter(self):
"""Test a SavedModel with deprecated TocoConverter."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromKerasFile(test_util.TensorFlowTestCase):
def setUp(self):
keras.backend.clear_session()
def _getSequentialModel(self):
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testSequentialModel(self):
"""Test a Sequential tf.keras model with default inputs."""
keras_file = self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
keras_file = self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
keras_file = self._getSequentialModel()
# Passing in shape of invalid input array has no impact as long as all input
# arrays have a shape.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_shapes={'invalid-input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Passing in shape of valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
keras_file = self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testFunctionalModel(self):
"""Test a Functional tf.keras model with default inputs."""
with session.Session().as_default():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
with session.Session().as_default():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
os.remove(keras_file)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(2, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelTocoConverter(self):
"""Test a Sequential tf.keras model with deprecated TocoConverter."""
keras_file = self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
if __name__ == '__main__':
test.main()
| {
"content_hash": "8ead12911f95696b9dfaef0957f3cbe2",
"timestamp": "",
"source": "github",
"line_count": 1150,
"max_line_length": 105,
"avg_line_length": 41.133043478260866,
"alnum_prop": 0.6626852419508277,
"repo_name": "kobejean/tensorflow",
"id": "d243a494f6e57d92864a988cd8ffa3cd87587db6",
"size": "47992",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/lite/python/lite_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49273038"
},
{
"name": "CMake",
"bytes": "195712"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "836009"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41122917"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "466896"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
""" Enter and verify config mode with netmiko """
from netmiko import ConnectHandler
from test_devices import *
devices = [pynet1, pynet2, juniper_srx]
password = '88newclass'
def main():
for device in devices:
device['password'] = password
net_connect = ConnectHandler(**device)
net_connect.config_mode()
print "\n==========================\n\nChecking " + device['ip'] + " is in config mode..."
print "\nConfig mode: {}".format(net_connect.check_config_mode())
print "\nCurrent prompt: {}".format(net_connect.find_prompt())
print "\n==========================\n"
if __name__ == "__main__":
main()
| {
"content_hash": "d6ed4a2d0850dbc8c58d0701f98f4957",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 98,
"avg_line_length": 33.15,
"alnum_prop": 0.5761689291101055,
"repo_name": "astir-py/pynet-class",
"id": "f61a66934cd7d07d9245c2ce67cf93a4d62ff93c",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class4/ex5netmiko_cfgmode.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10164"
},
{
"name": "Roff",
"bytes": "1678"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from .base import *
import django_heroku
# Disable debug mode
DEBUG = False
SECURE_SSL_REDIRECT = True
PREPEND_WWW = False # TODO: Set to true for production
ALLOWED_HOSTS = [get_env_variable('ALLOWED_HOSTS')]
AWS_ACCESS_KEY_ID = get_env_variable('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_variable('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = get_env_variable("AWS_STORAGE_BUCKET_NAME")
S3_USE_SIGV4 = True
AWS_S3_HOST = get_env_variable('AWS_S3_HOST')
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_URL = get_env_variable('STATIC_URL')
MEDIA_URL = get_env_variable('STATIC_URL')
AWS_S3_CUSTOM_DOMAIN = get_env_variable('AWS_S3_CUSTOM_DOMAIN')
# Compress static files offline
# http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True
COMPRESS_STORAGE = STATICFILES_STORAGE
COMPRESS_URL = STATIC_URL
RAVEN_CONFIG = {
'dsn': get_env_variable('RAVEN_DSN'),
}
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
# 'interactives_content',
'caching',
)
WAGTAILFRONTENDCACHE = {
'cloudflare': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudflareBackend',
'EMAIL': get_env_variable('CLOUDFLARE_EMAIL'),
'TOKEN': get_env_variable('CLOUDFLARE_TOKEN'),
'ZONEID': get_env_variable('CLOUDFLARE_ZONE_ID'),
},
}
FAVICON_PATH = STATIC_URL + 'img/favicon.png'
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': get_env_variable('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
}
IS_PRODUCTION = True
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'WARNING',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.template': {
'level': 'WARNING',
'handlers': ['sentry', 'console'],
'propagate': False,
},
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
ADMIN_ENABLED = False
django_heroku.settings(
locals(),
allowed_hosts=False,
logging=False,
secret_key=False,
staticfiles=False,
test_runner=False
)
| {
"content_hash": "43bd4689fc8c93f87f5bb486a4db0c14",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 100,
"avg_line_length": 26.26829268292683,
"alnum_prop": 0.5855772206747137,
"repo_name": "OpenCanada/website",
"id": "8b619453219e2923ce7d5c2ea6104af8e87d72a1",
"size": "3231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencanada/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "231211"
},
{
"name": "HTML",
"bytes": "400148"
},
{
"name": "JavaScript",
"bytes": "61896"
},
{
"name": "Python",
"bytes": "417101"
},
{
"name": "Shell",
"bytes": "985"
}
],
"symlink_target": ""
} |
import json
class Device:
def __init__( self, protocol, dataInput ):
self.__protocol = protocol
self.__rawData = dataInput
debugOutput = json.dumps( dataInput )
if not 'label' in dataInput.keys():
raise ValueError('No device name found: ' + debugOutput )
self.__label = dataInput['label']
if not 'controllableName' in dataInput.keys():
raise ValueError('No control label name found: ' + debugOutput )
self.__type = dataInput['controllableName']
if not 'deviceURL' in dataInput.keys():
raise ValueError('No control URL: ' + debugOutput )
self.__url = dataInput['deviceURL']
### Parse definitions
if not 'definition' in dataInput.keys():
raise ValueError('No device definition found: ' + debugOutput )
self.__definitions = {
'commands' : [],
'states' : []
}
definition = dataInput['definition']
if 'commands' in definition.keys():
for command in definition['commands']:
if command['commandName'] in self.__definitions['commands']:
raise ValueError("Command '" + command['commandName'] + "' double defined - " + debugOutput)
self.__definitions['commands'].append(command['commandName'])
if 'states' in definition.keys():
for state in definition['states']:
if state['qualifiedName'] in self.__definitions['states']:
raise ValueError("State '" + state['qualifiedName'] + "' double defined - " + debugOutput)
self.__definitions['states'].append(state['qualifiedName'])
### Parse active states
# calculate the amount of known active states
activeStatesAmount = 0
if 'states' in dataInput.keys():
for state in dataInput['states']:
activeStatesAmount += 1
# make sure there are not more active states than definitions
if activeStatesAmount > len(self.stateDefinitions):
raise ValueError(
'Missmatch of state definition and active states (' + str(len(self.stateDefinitions)) + '/' + str(
activeStatesAmount) + '): ' + debugOutput)
if len(self.stateDefinitions) > 0:
if not 'states' in dataInput.keys():
raise ValueError("No active states given.")
self.__activeStates = {}
for state in dataInput['states']:
if not state['name'] in self.stateDefinitions:
raise ValueError("Active state '" + state['name'] + "' has not been defined: " + debugOutput)
if state['name'] in self.__activeStates.keys():
raise ValueError("Active state '" + state['name'] + "' has been double defined: " + debugOutput)
self.__activeStates[state['name']] = state['value']
@property
def label(self):
return self.__label
@property
def commandDefinitions(self):
return self.__definitions['commands']
@property
def stateDefinitions(self):
return self.__definitions['states']
@property
def activeStates(self):
return self.__activeStates
def setActiveState(self, name, value):
if name not in self.__activeStates.keys():
raise ValueError("Can not set unknown state '" + name + "'")
if isinstance(self.__activeStates[name], int) and isinstance(value, str):
# we get an update as str but current value is an int, try to convert
self.__activeStates[name] = int(value)
elif isinstance(self.__activeStates[name], float) and isinstance(value, str):
# we get an update as str but current value is a float, try to convert
self.__activeStates[name] = float(value)
else:
self.__activeStates[name] = value
def setActiveStates(self, states):
for state in states:
self.setActiveState(state['name'], state['value'])
@property
def type(self):
return self.__type
@property
def url(self):
return self.__url
def executeAction(self, action):
self.__protocol
| {
"content_hash": "f74499a7208571949b0437921b481dbe",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 116,
"avg_line_length": 32.92307692307692,
"alnum_prop": 0.5810747663551402,
"repo_name": "bpannier/TahomaProtocol",
"id": "aa3fc02be48e0f1c53102e42bc01da2459f619d7",
"size": "4282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tahoma/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "546"
},
{
"name": "Python",
"bytes": "90951"
}
],
"symlink_target": ""
} |
"""Tests for builder.py."""
import subprocess
from unittest import mock
import pytest
from common import new_process
from experiment.build import gcb_build
# pylint: disable=protected-access
FAIL_RESULT = new_process.ProcessResult(1, '', False)
@mock.patch('common.new_process.execute', return_value=FAIL_RESULT)
@mock.patch('experiment.build.build_utils.store_build_logs')
def test_build_error(mocked_store_build_logs, _):
"""Tests that on error, _build raises subprocess.CalledProcessError and
calls store_build_logs."""
config_name = 'config'
with pytest.raises(subprocess.CalledProcessError):
gcb_build._build({}, config_name)
mocked_store_build_logs.assert_called_with(config_name, FAIL_RESULT)
SUCCESS_RESULT = new_process.ProcessResult(0, '', False)
@mock.patch('common.new_process.execute', return_value=SUCCESS_RESULT)
@mock.patch('experiment.build.build_utils.store_build_logs')
def test_build_success_store_logs(mocked_store_build_logs, _):
"""Tests that on success _buiild stores build logs."""
config_name = 'config'
gcb_build._build({}, config_name)
mocked_store_build_logs.assert_called_with(config_name, SUCCESS_RESULT)
| {
"content_hash": "01769b90aa6b5633f482e1e37baa034f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.7371956339210747,
"repo_name": "google/fuzzbench",
"id": "5b3fb0a4a1448cdf55aec2fccb68fef705e038b3",
"size": "1766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiment/build/test_gcb_build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17334"
},
{
"name": "C++",
"bytes": "37645"
},
{
"name": "Dockerfile",
"bytes": "337043"
},
{
"name": "HTML",
"bytes": "25840"
},
{
"name": "Jupyter Notebook",
"bytes": "578996"
},
{
"name": "Makefile",
"bytes": "2810"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1222236"
},
{
"name": "Shell",
"bytes": "86157"
}
],
"symlink_target": ""
} |
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
from __future__ import unicode_literals, division
import sys
import collections
import numpy as np
from six.moves import zip
from monty.string import is_string, list_strings
from pymatgen.util.num_utils import minloc
from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.Iterable):
"""
Responsible for parsing a list of output files, and managing the parsed database.
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
#DEFAULT_MPI_RANK = "0"
def __init__(self):
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
data = {}
def parse_line(line):
name, vals = line[:25], line[25:].split()
ctime, cfract, wtime, wfract, ncalls, gflops = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
inside, has_timer = 0, False
for line in fh:
#print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
#def set_default_mpi_rank(mpi_rank): self._default_mpi_rank = mpi_rank
#def get_default_mpi_rank(mpi_rank): return self._default_mpi_rank
def timers(self, filename=None, mpi_rank="0"):
"""Return the list of timers associated to the given filename and MPI rank mpi_rank."""
if filename is not None:
timers = [self._timers[filename][mpi_rank]]
else:
timers = [self._timers[filename][mpi_rank] for filename in self._filenames]
return timers
def section_names(self, ordkey="wall_time"):
"""Return the names of sections ordered by ordkey."""
section_names = [] # Avoid UnboundLocalError
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
#check = section_names
#else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
#if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() whose name is section_name
A fake section is returned if the timer does not have sectio_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total efficieny and the efficiency of each section)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
#print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame
"""
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].idxmin()
ref_wtime = frame.ix[i]["wall_time"]
ref_ncpus = frame.ix[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="gb", nmax=5, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
peff = self.pefficiency()
# Table with the parallel efficiency for all the sections.
#pprint_table(peff.totable())
n = len(timers)
xx = np.arange(n)
ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
legend_entries = []
# Plot sections with good efficiency.
lines = []
if "g" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
#print(g, peff[g])
yy = peff[g][key]
line, = ax.plot(xx, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "b" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
#print(b, peff[b])
yy = peff[b][key]
line, = ax.plot(xx, yy, "-.<", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(b)
if "total" not in legend_entries:
yy = peff["total"][key]
total_line, = ax.plot(xx, yy, "r", linewidth=3.0, markersize=10)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
#ax.set_title(title)
ax.set_xlabel('Total_NCPUs')
ax.set_ylabel('Efficiency')
ax.grid(True)
# Set xticks and labels.
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""Pie charts of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
n = len(timers)
# Make square figures and axes
the_grid = plt.GridSpec(n, 1)
fig = plt.figure(1, figsize=(6, 6))
for idx, timer in enumerate(timers):
plt.subplot(the_grid[idx, 0])
plt.title(str(timer))
timer.pie(key=key, minfract=minfract)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""Stacked histogram of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax = %d)" % nmax)
values.append(rest)
#for (n, vals) in zip(names, values): print(n, vals)
# The dataset is stored in values.
# Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
# this does not work with matplotlib < 1.0
#plt.rcParams['axes.color_cycle'] = ['r', 'g', 'b', 'c']
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = plt.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
#ax.title("Stacked histogram for the %d most important sections" % nmax)
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
plt.xticks(ind + width / 2.0, labels, rotation=15)
#plt.yticks(np.arange(0,81,10))
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
def plot_all(self, **kwargs):
figs = []; app = figs.append
app(self.plot_efficiency())
app(self.plot_pie())
app(self.plot_stacked_hist())
return figs
class ParallelEfficiency(dict):
def __init__(self, filenames, ref_idx, *args, **kwargs):
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
estimators = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items)
}
self.estimator = estimators[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all([v != -1 for v in peff[key]]):
values = peff[key][:]
#print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
fsort = lambda t: t[1]
data.sort(key=fsort, reverse=reverse)
return tuple([sect_name for (sect_name, e) in data])
def totable(self, stop=None, reverse=True):
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection(object):
"""Record with the timing results associated to a section of code."""
STR_FIELDS = [
"name"
]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
return tuple([self.__dict__[at] for at in AbinitTimerSection.FIELDS])
def to_dict(self):
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
string = ""
for a in AbinitTimerSection.FIELDS: string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer(object):
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple([s.name for s in self.sections])
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file = %s, wall_time = %.1f, mpi_nprocs = %d, omp_nthreads = %d" % (
self.fname, self.wall_time, self.mpi_nprocs, self.omp_nthreads )
#string += ", rank = " + self.mpi_rank
return string
def __cmp__(self, other):
return cmp(self.wall_time, other.wall_time)
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
try:
idx = self.section_names.index(section_name)
except:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for (idx, section) in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return pandas DataFrame
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""Return a list of values associated to a particular list of keys"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
else:
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the correspoding value
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
fsort = lambda t: t[1]
nandv = [nv for nv in zip(new_names, new_values)]
nandv.sort(key=fsort)
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
fsort = lambda s: s.__dict__[key]
return sorted(self.sections, key=fsort, reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color='r')
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color='y')
# Add ylable and title
ax.set_ylabel('Time (s)')
#if title:
# plt.title(title)
#else:
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ('CPU', 'Wall'), loc="best")
return fig
#def hist2(self, key1="wall_time", key2="cpu_time"):
# labels = self.get_values("name")
# vals1, vals2 = self.get_values([key1, key2])
# N = len(vals1)
# assert N == len(vals2)
# plt.figure(1)
# plt.subplot(2, 1, 1) # 2 rows, 1 column, figure 1
# n1, bins1, patches1 = plt.hist(vals1, N, facecolor="m")
# plt.xlabel(labels)
# plt.ylabel(key1)
# plt.subplot(2, 1, 2)
# n2, bins2, patches2 = plt.hist(vals2, N, facecolor="y")
# plt.xlabel(labels)
# plt.ylabel(key2)
# plt.show()
def pie(self, key="wall_time", minfract=0.05, title=None):
import matplotlib.pyplot as plt
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
return plt.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
def scatter_hist(self, ax=None, **kwargs):
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
#title = kwargs.pop("title", None)
#show = kwargs.pop("show", True)
#savefig = kwargs.pop("savefig", None)
#fig = plt.figure(1, figsize=(5.5, 5.5))
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
return fig
| {
"content_hash": "729cf18f8047be8d8892b3aa995a734c",
"timestamp": "",
"source": "github",
"line_count": 798,
"max_line_length": 119,
"avg_line_length": 33.042606516290725,
"alnum_prop": 0.5494538834951457,
"repo_name": "ndardenne/pymatgen",
"id": "02784a4b1dbd099d41af4eeda2a7d41bef2fb568",
"size": "26477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/io/abinit/abitimer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "5203893"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
from rdflib.namespace import DefinedNamespace, Namespace
from rdflib.term import URIRef
class OWL(DefinedNamespace):
"""
The OWL 2 Schema vocabulary (OWL 2)
This ontology partially describes the built-in classes and properties that together form the basis of
the RDF/XML syntax of OWL 2. The content of this ontology is based on Tables 6.1 and 6.2 in Section 6.4
of the OWL 2 RDF-Based Semantics specification, available at http://www.w3.org/TR/owl2-rdf-based-
semantics/. Please note that those tables do not include the different annotations (labels, comments and
rdfs:isDefinedBy links) used in this file. Also note that the descriptions provided in this ontology do not
provide a complete and correct formal description of either the syntax or the semantics of the introduced
terms (please see the OWL 2 recommendations for the complete and normative specifications). Furthermore,
the information provided by this ontology may be misleading if not used with care. This ontology SHOULD NOT
be imported into OWL ontologies. Importing this file into an OWL 2 DL ontology will cause it to become
an OWL 2 Full ontology and may have other, unexpected, consequences.
Generated from: http://www.w3.org/2002/07/owl#
Date: 2020-05-26 14:20:03.193795
"""
_fail = True
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
allValuesFrom: URIRef # The property that determines the class that a universal property restriction refers to.
annotatedProperty: URIRef # The property that determines the predicate of an annotated axiom or annotated annotation.
annotatedSource: URIRef # The property that determines the subject of an annotated axiom or annotated annotation.
annotatedTarget: URIRef # The property that determines the object of an annotated axiom or annotated annotation.
assertionProperty: URIRef # The property that determines the predicate of a negative property assertion.
cardinality: URIRef # The property that determines the cardinality of an exact cardinality restriction.
complementOf: URIRef # The property that determines that a given class is the complement of another class.
datatypeComplementOf: URIRef # The property that determines that a given data range is the complement of another data range with respect to the data domain.
differentFrom: URIRef # The property that determines that two given individuals are different.
disjointUnionOf: URIRef # The property that determines that a given class is equivalent to the disjoint union of a collection of other classes.
disjointWith: URIRef # The property that determines that two given classes are disjoint.
distinctMembers: URIRef # The property that determines the collection of pairwise different individuals in a owl:AllDifferent axiom.
equivalentClass: URIRef # The property that determines that two given classes are equivalent, and that is used to specify datatype definitions.
equivalentProperty: URIRef # The property that determines that two given properties are equivalent.
hasKey: URIRef # The property that determines the collection of properties that jointly build a key.
hasSelf: URIRef # The property that determines the property that a self restriction refers to.
hasValue: URIRef # The property that determines the individual that a has-value restriction refers to.
intersectionOf: URIRef # The property that determines the collection of classes or data ranges that build an intersection.
inverseOf: URIRef # The property that determines that two given properties are inverse.
maxCardinality: URIRef # The property that determines the cardinality of a maximum cardinality restriction.
maxQualifiedCardinality: URIRef # The property that determines the cardinality of a maximum qualified cardinality restriction.
members: URIRef # The property that determines the collection of members in either a owl:AllDifferent, owl:AllDisjointClasses or owl:AllDisjointProperties axiom.
minCardinality: URIRef # The property that determines the cardinality of a minimum cardinality restriction.
minQualifiedCardinality: URIRef # The property that determines the cardinality of a minimum qualified cardinality restriction.
onClass: URIRef # The property that determines the class that a qualified object cardinality restriction refers to.
onDataRange: URIRef # The property that determines the data range that a qualified data cardinality restriction refers to.
onDatatype: URIRef # The property that determines the datatype that a datatype restriction refers to.
onProperties: URIRef # The property that determines the n-tuple of properties that a property restriction on an n-ary data range refers to.
onProperty: URIRef # The property that determines the property that a property restriction refers to.
oneOf: URIRef # The property that determines the collection of individuals or data values that build an enumeration.
propertyChainAxiom: URIRef # The property that determines the n-tuple of properties that build a sub property chain of a given property.
propertyDisjointWith: URIRef # The property that determines that two given properties are disjoint.
qualifiedCardinality: URIRef # The property that determines the cardinality of an exact qualified cardinality restriction.
sameAs: URIRef # The property that determines that two given individuals are equal.
someValuesFrom: URIRef # The property that determines the class that an existential property restriction refers to.
sourceIndividual: URIRef # The property that determines the subject of a negative property assertion.
targetIndividual: URIRef # The property that determines the object of a negative object property assertion.
targetValue: URIRef # The property that determines the value of a negative data property assertion.
unionOf: URIRef # The property that determines the collection of classes or data ranges that build a union.
withRestrictions: URIRef # The property that determines the collection of facet-value pairs that define a datatype restriction.
# http://www.w3.org/2000/01/rdf-schema#Class
AllDifferent: URIRef # The class of collections of pairwise different individuals.
AllDisjointClasses: URIRef # The class of collections of pairwise disjoint classes.
AllDisjointProperties: URIRef # The class of collections of pairwise disjoint properties.
Annotation: URIRef # The class of annotated annotations for which the RDF serialization consists of an annotated subject, predicate and object.
AnnotationProperty: URIRef # The class of annotation properties.
AsymmetricProperty: URIRef # The class of asymmetric properties.
Axiom: URIRef # The class of annotated axioms for which the RDF serialization consists of an annotated subject, predicate and object.
Class: URIRef # The class of OWL classes.
DataRange: URIRef # The class of OWL data ranges, which are special kinds of datatypes. Note: The use of the IRI owl:DataRange has been deprecated as of OWL 2. The IRI rdfs:Datatype SHOULD be used instead.
DatatypeProperty: URIRef # The class of data properties.
DeprecatedClass: URIRef # The class of deprecated classes.
DeprecatedProperty: URIRef # The class of deprecated properties.
FunctionalProperty: URIRef # The class of functional properties.
InverseFunctionalProperty: URIRef # The class of inverse-functional properties.
IrreflexiveProperty: URIRef # The class of irreflexive properties.
NamedIndividual: URIRef # The class of named individuals.
NegativePropertyAssertion: URIRef # The class of negative property assertions.
ObjectProperty: URIRef # The class of object properties.
Ontology: URIRef # The class of ontologies.
OntologyProperty: URIRef # The class of ontology properties.
ReflexiveProperty: URIRef # The class of reflexive properties.
Restriction: URIRef # The class of property restrictions.
SymmetricProperty: URIRef # The class of symmetric properties.
TransitiveProperty: URIRef # The class of transitive properties.
# http://www.w3.org/2002/07/owl#AnnotationProperty
backwardCompatibleWith: URIRef # The annotation property that indicates that a given ontology is backward compatible with another ontology.
deprecated: URIRef # The annotation property that indicates that a given entity has been deprecated.
incompatibleWith: URIRef # The annotation property that indicates that a given ontology is incompatible with another ontology.
priorVersion: URIRef # The annotation property that indicates the predecessor ontology of a given ontology.
versionInfo: URIRef # The annotation property that provides version information for an ontology or another OWL construct.
# http://www.w3.org/2002/07/owl#Class
Nothing: URIRef # This is the empty class.
Thing: URIRef # The class of OWL individuals.
# http://www.w3.org/2002/07/owl#DatatypeProperty
bottomDataProperty: URIRef # The data property that does not relate any individual to any data value.
topDataProperty: URIRef # The data property that relates every individual to every data value.
# http://www.w3.org/2002/07/owl#ObjectProperty
bottomObjectProperty: URIRef # The object property that does not relate any two individuals.
topObjectProperty: URIRef # The object property that relates every two individuals.
# http://www.w3.org/2002/07/owl#OntologyProperty
imports: URIRef # The property that is used for importing other ontologies into a given ontology.
versionIRI: URIRef # The property that identifies the version IRI of an ontology.
# http://www.w3.org/2000/01/rdf-schema#Datatype
# NOTE: the following two elements don't appear in the OWL RDF documents but are defined in the OWL2 Recommentation
# at https://www.w3.org/TR/owl2-syntax/#Datatype_Maps
rational: URIRef # The value space is the set of all rational numbers. The lexical form is numerator '/' denominator, where both are integers.
real: URIRef # The value space is the set of all real numbers. Does not directly provide any lexical forms.
_NS = Namespace("http://www.w3.org/2002/07/owl#")
| {
"content_hash": "334a0fac68ebcb776413c3f6334c64ee",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 210,
"avg_line_length": 83.48387096774194,
"alnum_prop": 0.7718315301391035,
"repo_name": "RDFLib/rdflib",
"id": "98083a0198b53fe8b8592c32e7148ec155eb2fbb",
"size": "10352",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rdflib/namespace/_OWL.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "41303"
},
{
"name": "Python",
"bytes": "2828721"
},
{
"name": "Ruby",
"bytes": "31777"
},
{
"name": "Shell",
"bytes": "6030"
},
{
"name": "XSLT",
"bytes": "1588"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
app_name = 'users' | {
"content_hash": "7c49f20708b817ace775b476003c344b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 56,
"avg_line_length": 20.266666666666666,
"alnum_prop": 0.5361842105263158,
"repo_name": "amstart/demo",
"id": "ddfc7dd5342798093f3e47192383e6605c8dc4ee",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/test_one_vote_model",
"path": "demoslogic/users/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3826"
},
{
"name": "HTML",
"bytes": "41694"
},
{
"name": "JavaScript",
"bytes": "3922"
},
{
"name": "Jupyter Notebook",
"bytes": "22630"
},
{
"name": "Python",
"bytes": "174579"
},
{
"name": "Shell",
"bytes": "4188"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import graphene-custom-directives
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Graphene Custom Directives'
copyright = u'2016, Eran Kampf'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = graphene-custom-directives.__version__
# The full version, including alpha/beta/rc tags.
release = graphene-custom-directives.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'graphene-custom-directivesdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'graphene-custom-directives.tex',
u'Graphene Custom Directives Documentation',
u'Eran Kampf', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'graphene-custom-directives',
u'Graphene Custom Directives Documentation',
[u'Eran Kampf'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'graphene-custom-directives',
u'Graphene Custom Directives Documentation',
u'Eran Kampf',
'graphene-custom-directives',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "3dccda9813c7d4b4415cfd5b90cdf5ca",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 31.403846153846153,
"alnum_prop": 0.7070422535211267,
"repo_name": "ekampf/graphene-custom-directives",
"id": "40634ca549b1a38e771847d6d7fa6f7ecd59a80d",
"size": "8626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2642"
},
{
"name": "Python",
"bytes": "15523"
}
],
"symlink_target": ""
} |
from argparse import Namespace
from datetime import datetime
from typing import List, Tuple, Optional
from fhirtordf.rdfsupport.fhirgraphutils import value
from fhirtordf.rdfsupport.uriutils import parse_fhir_resource_uri
from rdflib import Graph, RDF, URIRef
from i2fhirb2.fhir.fhirobservationfact import FHIRObservationFactFactory
from i2fhirb2.fhir.fhirpatientdimension import FHIRPatientDimension
from i2fhirb2.fhir.fhirpatientmapping import FHIRPatientMapping
from i2fhirb2.fhir.fhirresourcemap import FHIR_RESOURCE_MAP, FHIR_Infrastructure_type, FHIR_Observation_Fact_type, \
FHIR_Visit_Dimension_type, FHIR_Provider_Dimension_type, FHIR_Patient_Dimension_type, FHIR_Bundle_type, \
FHIR_Resource_type
from i2fhirb2.fhir.fhirspecific import FHIR
from i2fhirb2.fhir.fhirvisitdimension import FHIRVisitDimension
from i2b2model.data.i2b2encountermapping import EncounterMapping
from i2b2model.data.i2b2observationfact import ObservationFactKey, ObservationFact
from i2b2model.data.i2b2patientdimension import PatientDimension
from i2b2model.data.i2b2patientmapping import PatientMapping
from i2b2model.data.i2b2visitdimension import VisitDimension
from i2b2model.shared.i2b2core import I2B2Core
from i2b2model.sqlsupport.dbconnection import I2B2Tables
from i2b2model.sqlsupport.i2b2tables import change_column_length
from i2fhirb2.tsv_support.tsvwriter import write_tsv
from dynprops import heading
# TODO: Handle continuation pages in queries and bundles
class I2B2GraphMap:
def __init__(self, g: Graph, opts: Namespace) -> None:
"""
Iterate over the resources in the graph mapping them to their i2b2 equivalent
:param g: graph
:param opts: input options
"""
self._opts = opts
self._g = g
self.num_infrastructure = 0 # Number of infrastructure resources encountered (and not loaded)
self.num_visit = 0 # Number of visit resources (to be implemented)
self.num_provider = 0 # Number of provider resources
self.num_bundle = 0 # Number of bundler resources (we should be unwrapping these?)
self.num_unmapped = 0 # Number of untyped resources encountered (need classifying)
self.observation_facts = [] # type: List[ObservationFact]
self.patient_dimensions = [] # type: List[PatientDimension]
self.patient_mappings = [] # type: List[PatientMapping]
self.visit_dimensions = [] # type: List[VisitDimension]
self.encounter_mappings = [] # type: List[EncounterMapping]
self.tables = opts.tables # type: I2B2Tables
nresources = 0
for subj, subj_type in g.subject_objects(RDF.type):
if isinstance(subj, URIRef) and subj_type in FHIR_RESOURCE_MAP:
action = f"{nresources}: ({str(subj_type).split('/')[-1]}) - {subj}"
nresources += 1
mapped_type = FHIR_RESOURCE_MAP[subj_type]
if isinstance(mapped_type, FHIR_Infrastructure_type):
self.num_infrastructure += 1
rslt = "Skipped"
elif isinstance(mapped_type, FHIR_Observation_Fact_type):
pm, vd, start_date = self.process_resource_instance(subj, mapped_type)
if pm is not None:
obsfactory = \
FHIRObservationFactFactory(g, ObservationFactKey(pm.patient_num,
vd.visit_dimension_entry.encounter_num,
opts.providerid, start_date), subj)
# TODO: Decide what do do with the other mappings in the observation factory
self.observation_facts += obsfactory.observation_facts
rslt = f"pnum: {pm.patient_num if pm is not None else 'NONE'} " \
f"enum:{vd.visit_dimension_entry.encounter_num if vd is not None else 'NONE'}"
elif isinstance(mapped_type, FHIR_Visit_Dimension_type):
self.num_visit += 1
rslt = "Not Implemented"
elif isinstance(mapped_type, FHIR_Provider_Dimension_type):
self.num_provider += 1
rslt = "Not Implemented"
elif isinstance(mapped_type, FHIR_Patient_Dimension_type):
pd = FHIRPatientDimension(self._g, self.tables, subj)
self.patient_dimensions.append(pd.patient_dimension_entry)
self.patient_mappings += pd.patient_mappings.patient_mapping_entries
rslt = f"pnum: {pd.patient_dimension_entry.patient_num}"
elif isinstance(mapped_type, FHIR_Bundle_type):
self.num_bundle += 1
rslt = "Skipped"
else:
self.num_unmapped += 1
rslt = "Unmapped"
print(f"{action} ({rslt})")
print("---> Graph map phase complete")
def process_resource_instance(self, subj: URIRef, mapped_type: FHIR_Resource_type) \
-> Tuple[Optional[FHIRPatientMapping], Optional[FHIRVisitDimension], Optional[datetime]]:
patient_id_uri, encounter_id_uri, provider_id = mapped_type.fact_key_for(self._g, subj)
if patient_id_uri is not None:
parsed_resource = parse_fhir_resource_uri(patient_id_uri)
pm = FHIRPatientMapping(self.tables, parsed_resource.resource, str(parsed_resource.namespace))
self.patient_mappings += pm.patient_mapping_entries
start_date = value(self._g, subj, FHIR.Observation.effectiveDateTime)
if not start_date:
start_date = datetime.now()
vd = FHIRVisitDimension(subj, pm.patient_num, parsed_resource.resource, str(parsed_resource.namespace),
start_date)
self.visit_dimensions.append(vd.visit_dimension_entry)
self.encounter_mappings += vd.encounter_mappings.encounter_mapping_entries
return pm, vd, start_date
else:
# Just a reference to a resource instance -- drop it
return None, None, None
def generate_tsv_files(self) -> None:
self._generate_tsv_file("observation_fact.tsv", ObservationFact, self.observation_facts)
self._generate_tsv_file("patient_dimension.tsv", PatientDimension, self.patient_dimensions)
self._generate_tsv_file("patient_mapping.tsv", PatientMapping, self.patient_mappings)
self._generate_tsv_file("visit_dimension.tsv", VisitDimension, self.visit_dimensions)
self._generate_tsv_file("encounter_mapping.tsv", EncounterMapping, self.encounter_mappings)
def _generate_tsv_file(self, fname: str, cls, values: List[I2B2Core]) -> None:
write_tsv(self._opts.outdir, fname, heading(cls), values)
@staticmethod
def clear_i2b2_tables(tables: I2B2Tables, uploadid: int) -> None:
"""
Remove all entries in the i2b2 tables for uploadid.
:param tables:
:param uploadid:
:return:
"""
# This is a static function to support the removefacts operation
print("Deleted {} patient_dimension records"
.format(PatientDimension.delete_upload_id(tables, uploadid)))
print("Deleted {} patient_mapping records"
.format(PatientMapping.delete_upload_id(tables, uploadid)))
print("Deleted {} observation_fact records"
.format(ObservationFact.delete_upload_id(tables, uploadid)))
print("Deleted {} visit_dimension records"
.format(VisitDimension.delete_upload_id(tables, uploadid)))
print("Deleted {} encounter_mapping records"
.format(EncounterMapping.delete_upload_id(tables, uploadid)))
@staticmethod
def clear_i2b2_sourcesystems(tables: I2B2Tables, sourcesystemcd: str) -> None:
print("Deleted {} patient_dimension records"
.format(PatientDimension.delete_sourcesystem_cd(tables, sourcesystemcd)))
print("Deleted {} patient_mapping records"
.format(PatientMapping.delete_sourcesystem_cd(tables, sourcesystemcd)))
print("Deleted {} observation_fact records"
.format(ObservationFact.delete_sourcesystem_cd(tables, sourcesystemcd)))
print("Deleted {} visit_dimension records"
.format(VisitDimension.delete_sourcesystem_cd(tables, sourcesystemcd)))
print("Deleted {} encounter_mapping records"
.format(EncounterMapping.delete_sourcesystem_cd(tables, sourcesystemcd)))
def load_i2b2_tables(self, check_dups=False) -> None:
# session = sessionmaker(bind=tables.crc_engine)()
I2B2Core._check_dups = check_dups
if self._opts.remove:
# TODO: This should really be within a transaction boundary
self.clear_i2b2_tables(self._opts.tables, self._opts.uploadid)
change_column_length(self._opts.tables.observation_fact, self._opts.tables.observation_fact.c.concept_cd,
200, self._opts.tables.crc_connection)
change_column_length(self._opts.tables.observation_fact, self._opts.tables.observation_fact.c.modifier_cd,
200, self._opts.tables.crc_connection)
print("{} / {} patient_dimension records added / modified"
.format(*PatientDimension.add_or_update_records(self._opts.tables, self.patient_dimensions)))
print("{} / {} patient_mapping records added / modified"
.format(*PatientMapping.add_or_update_records(self._opts.tables, self.patient_mappings)))
print("{} / {} visit_dimension records added / modified"
.format(*VisitDimension.add_or_update_records(self._opts.tables, self.visit_dimensions)))
print("{} / {} encounter_mapping records added / modified"
.format(*EncounterMapping.add_or_update_records(self._opts.tables, self.encounter_mappings)))
print("{} / {} observation_fact records added / modified"
.format(*ObservationFact.add_or_update_records(self._opts.tables, self.observation_facts)))
# session.close()
def summary(self) -> str:
summary_text = """Generated:
{} Observation facts
{} Patients
{} Patient mappings
"""
skip_text = """=== SKIPS ===
{num_bundle} Bundled resources (shouldn't happen?)
{num_visit} Visit resources
{num_infrastructure} Infrastructure resources
{num_provider} Provider resources
{num_unmapped} Unmapped resources
"""
num_skips = self.num_infrastructure + self.num_visit + self.num_provider + self.num_unmapped + self.num_bundle
rval = summary_text.format(len(self.observation_facts),
len(self.patient_dimensions),
len(self.patient_mappings))
if num_skips:
rval += skip_text.format(**self.__dict__)
return rval
| {
"content_hash": "9ec9666a833e10b334e57a0fb78e51ad",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 118,
"avg_line_length": 57.58762886597938,
"alnum_prop": 0.6368600071607591,
"repo_name": "BD2KOnFHIR/i2FHIRb2",
"id": "bac19fe000aa705a244d342ea43a994b2f0a0f62",
"size": "11172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i2fhirb2/loaders/i2b2graphmap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "263110"
}
],
"symlink_target": ""
} |
from gen import *
from dataset import *
import numpy as np
from fg import Foreground, FGTextureType
def save_to_file(npy_file_name, n_examples, dataset, use_patch_centers=False, e=16):
np_data = np.array(np.zeros(e**2))
np_targets = np.array(np.zeros(1))
if use_patch_centers:
np_patch_centers = np.array(np.zeros(64))
n_count = 0
for data in dataset:
if n_count == n_examples:
break
np_data = np.vstack((np_data, data[0]))
np_targets = np.vstack((np_targets, data[1]))
if use_patch_centers:
np_patch_centers = np.vstack((np_patch_centers, data[2]))
n_count+=1
np_data = np_data[1:]
np_data.dtype = np.float32
np_targets = np_targets[1:]
np_targets.dtype = np.uint8
if use_patch_centers:
np_patch_centers = np_patch_centers[1:]
np_patch_centers.dtype = np.int8
np_dataset = np.array([np_data, np_targets, np_patch_centers])
else:
np_dataset = np.array([np_data, np_targets])
print "Converted %s to a numpy array." % npy_file_name
np.save(npy_file_name, np_dataset)
if __name__=="__main__":
# TETROMINO
tetromino_gen = lambda w, h: TwoGroups("tetrisi/tetriso/tetrist/tetrisl/tetrisj/tetriss/tetrisz",
1010, w, h,
n1 = 1, n2 = 2, rot = True, scale=True, task = 1)
fg = Foreground(size=(16, 16), texture_type=FGTextureType.PlainBin)
texture = fg.generate_texture()
# PENTOMINO
pentomino_gen = lambda w, h: TwoGroups("pentl/pentn/pentp/pentf/penty/pentj/pentn2/pentq/pentf2/penty2",
2020, w, h, use_patch_centers=True,
n1 = 1, n2 = 2, rot = True, texture=texture, scale=True, task = 1)
pentomino = lambda w, h: SpritePlacer(pentomino_gen(w, h), collision_check=True, enable_perlin=False)
pentomino64x64 = pentomino(64, 64)
pentomino_dir = "/data/lisa/data/pentomino/"
pentomino64x64_raw = pentomino_dir + "pentomino64x64_300_presence.npy"
print "Started saving pentomino64x64"
no_of_examples = 300
save_to_file(pentomino64x64_raw, no_of_examples, pentomino64x64,
use_patch_centers=True, e=64)
| {
"content_hash": "be60d4c2868a3b7efe8e5182502c7599",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 108,
"avg_line_length": 34.54545454545455,
"alnum_prop": 0.6013157894736842,
"repo_name": "caglar/Arcade-Universe",
"id": "c09e212579e241a2271c013d35599cdbd3245041",
"size": "2280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arcade_universe/premade_simple_pento64x64.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "99513"
},
{
"name": "Shell",
"bytes": "2819"
}
],
"symlink_target": ""
} |
"""Prepare dataset for keras model benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from official.utils.misc import model_helpers # pylint: disable=g-bad-import-order
# Default values for dataset.
_NUM_CHANNELS = 3
_NUM_CLASSES = 1000
def _get_default_image_size(model):
"""Provide default image size for each model."""
image_size = (224, 224)
if model in ["inceptionv3", "xception", "inceptionresnetv2"]:
image_size = (299, 299)
elif model in ["nasnetlarge"]:
image_size = (331, 331)
return image_size
def generate_synthetic_input_dataset(model, batch_size):
"""Generate synthetic dataset."""
image_size = _get_default_image_size(model)
image_shape = (batch_size,) + image_size + (_NUM_CHANNELS,)
label_shape = (batch_size, _NUM_CLASSES)
dataset = model_helpers.generate_synthetic_data(
input_shape=tf.TensorShape(image_shape),
label_shape=tf.TensorShape(label_shape),
)
return dataset
class Cifar10Dataset(object):
"""CIFAR10 dataset, including train and test set.
Each sample consists of a 32x32 color image, and label is from 10 classes.
"""
def __init__(self, batch_size):
"""Initializes train/test datasets.
Args:
batch_size: int, the number of batch size.
"""
self.input_shape = (32, 32, 3)
self.num_classes = 10
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)
y_test = tf.keras.utils.to_categorical(y_test, self.num_classes)
self.train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(2000).batch(batch_size).repeat()
self.test_dataset = tf.data.Dataset.from_tensor_slices(
(x_test, y_test)).shuffle(2000).batch(batch_size).repeat()
| {
"content_hash": "72c519d2599fdc38e40fb2efebef2553",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 83,
"avg_line_length": 33.86666666666667,
"alnum_prop": 0.6855314960629921,
"repo_name": "derekjchow/models",
"id": "1a35caeb3275ca427f709af538d11090d3512554",
"size": "2721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/keras_application_models/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
} |
"""
Data structures for the Buffer.
It holds the text, cursor position, history, etc...
"""
from __future__ import unicode_literals
from .auto_suggest import AutoSuggest
from .clipboard import ClipboardData
from .completion import Completer, Completion, CompleteEvent
from .document import Document
from .enums import IncrementalSearchDirection
from .filters import to_simple_filter
from .history import History, InMemoryHistory
from .search_state import SearchState
from .selection import SelectionType, SelectionState
from .utils import Event
from .cache import FastDictCache
from .validation import ValidationError
from six.moves import range
import os
import re
import six
import subprocess
import tempfile
__all__ = (
'EditReadOnlyBuffer',
'AcceptAction',
'Buffer',
'indent',
'unindent',
'reshape_text',
)
class EditReadOnlyBuffer(Exception):
" Attempt editing of read-only :class:`.Buffer`. "
class AcceptAction(object):
"""
What to do when the input is accepted by the user.
(When Enter was pressed in the command line.)
:param handler: (optional) A callable which takes a
:class:`~prompt_toolkit.interface.CommandLineInterface` and
:class:`~prompt_toolkit.document.Document`. It is called when the user
accepts input.
"""
def __init__(self, handler=None):
assert handler is None or callable(handler)
self.handler = handler
@classmethod
def run_in_terminal(cls, handler, render_cli_done=False):
"""
Create an :class:`.AcceptAction` that runs the given handler in the
terminal.
:param render_cli_done: When True, render the interface in the 'Done'
state first, then execute the function. If False, erase the
interface instead.
"""
def _handler(cli, buffer):
cli.run_in_terminal(lambda: handler(cli, buffer), render_cli_done=render_cli_done)
return AcceptAction(handler=_handler)
@property
def is_returnable(self):
"""
True when there is something handling accept.
"""
return bool(self.handler)
def validate_and_handle(self, cli, buffer):
"""
Validate buffer and handle the accept action.
"""
if buffer.validate():
if self.handler:
self.handler(cli, buffer)
buffer.append_to_history()
def _return_document_handler(cli, buffer):
cli.set_return_value(buffer.document)
AcceptAction.RETURN_DOCUMENT = AcceptAction(_return_document_handler)
AcceptAction.IGNORE = AcceptAction(handler=None)
class CompletionState(object):
"""
Immutable class that contains a completion state.
"""
def __init__(self, original_document, current_completions=None, complete_index=None):
#: Document as it was when the completion started.
self.original_document = original_document
#: List of all the current Completion instances which are possible at
#: this point.
self.current_completions = current_completions or []
#: Position in the `current_completions` array.
#: This can be `None` to indicate "no completion", the original text.
self.complete_index = complete_index # Position in the `_completions` array.
def __repr__(self):
return '%s(%r, <%r> completions, index=%r)' % (
self.__class__.__name__,
self.original_document, len(self.current_completions), self.complete_index)
def go_to_index(self, index):
"""
Create a new :class:`.CompletionState` object with the new index.
"""
return CompletionState(self.original_document, self.current_completions, complete_index=index)
def new_text_and_position(self):
"""
Return (new_text, new_cursor_position) for this completion.
"""
if self.complete_index is None:
return self.original_document.text, self.original_document.cursor_position
else:
original_text_before_cursor = self.original_document.text_before_cursor
original_text_after_cursor = self.original_document.text_after_cursor
c = self.current_completions[self.complete_index]
if c.start_position == 0:
before = original_text_before_cursor
else:
before = original_text_before_cursor[:c.start_position]
new_text = before + c.text + original_text_after_cursor
new_cursor_position = len(before) + len(c.text)
return new_text, new_cursor_position
@property
def current_completion(self):
"""
Return the current completion, or return `None` when no completion is
selected.
"""
if self.complete_index is not None:
return self.current_completions[self.complete_index]
class Buffer(object):
"""
The core data structure that holds the text and cursor position of the
current input line and implements all text manupulations on top of it. It
also implements the history, undo stack and the completion state.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param tempfile_suffix: Suffix to be appended to the tempfile for the 'open
in editor' function.
Events:
:param on_text_changed: When the buffer text changes. (Callable on None.)
:param on_text_insert: When new text is inserted. (Callable on None.)
:param on_cursor_position_changed: When the cursor moves. (Callable on None.)
Filters:
:param is_multiline: :class:`~prompt_toolkit.filters.SimpleFilter` to
indicate whether we should consider this buffer a multiline input. If
so, key bindings can decide to insert newlines when pressing [Enter].
(Instead of accepting the input.)
:param complete_while_typing: :class:`~prompt_toolkit.filters.SimpleFilter`
instance. Decide whether or not to do asynchronous autocompleting while
typing.
:param enable_history_search: :class:`~prompt_toolkit.filters.SimpleFilter`
to indicate when up-arrow partial string matching is enabled. It is
adviced to not enable this at the same time as `complete_while_typing`,
because when there is an autocompletion found, the up arrows usually
browse through the completions, rather than through the history.
:param read_only: :class:`~prompt_toolkit.filters.SimpleFilter`. When True,
changes will not be allowed.
"""
def __init__(self, completer=None, auto_suggest=None, history=None,
validator=None, tempfile_suffix='',
is_multiline=False, complete_while_typing=False,
enable_history_search=False, initial_document=None,
accept_action=AcceptAction.IGNORE, read_only=False,
on_text_changed=None, on_text_insert=None, on_cursor_position_changed=None):
# Accept both filters and booleans as input.
enable_history_search = to_simple_filter(enable_history_search)
is_multiline = to_simple_filter(is_multiline)
complete_while_typing = to_simple_filter(complete_while_typing)
read_only = to_simple_filter(read_only)
# Validate input.
assert completer is None or isinstance(completer, Completer)
assert auto_suggest is None or isinstance(auto_suggest, AutoSuggest)
assert history is None or isinstance(history, History)
assert on_text_changed is None or callable(on_text_changed)
assert on_text_insert is None or callable(on_text_insert)
assert on_cursor_position_changed is None or callable(on_cursor_position_changed)
self.completer = completer
self.auto_suggest = auto_suggest
self.validator = validator
self.tempfile_suffix = tempfile_suffix
self.accept_action = accept_action
# Filters. (Usually, used by the key bindings to drive the buffer.)
self.is_multiline = is_multiline
self.complete_while_typing = complete_while_typing
self.enable_history_search = enable_history_search
self.read_only = read_only
# Text width. (For wrapping, used by the Vi 'gq' operator.)
self.text_width = 0
#: The command buffer history.
# Note that we shouldn't use a lazy 'or' here. bool(history) could be
# False when empty.
self.history = InMemoryHistory() if history is None else history
self.__cursor_position = 0
# Events
self.on_text_changed = Event(self, on_text_changed)
self.on_text_insert = Event(self, on_text_insert)
self.on_cursor_position_changed = Event(self, on_cursor_position_changed)
# Document cache. (Avoid creating new Document instances.)
self._document_cache = FastDictCache(Document, size=10)
self.reset(initial_document=initial_document)
def reset(self, initial_document=None, append_to_history=False):
"""
:param append_to_history: Append current input to history first.
"""
assert initial_document is None or isinstance(initial_document, Document)
if append_to_history:
self.append_to_history()
initial_document = initial_document or Document()
self.__cursor_position = initial_document.cursor_position
# `ValidationError` instance. (Will be set when the input is wrong.)
self.validation_error = None
# State of the selection.
self.selection_state = None
# When doing consecutive up/down movements, prefer to stay at this column.
self.preferred_column = None
# State of complete browser
self.complete_state = None # For interactive completion through Ctrl-N/Ctrl-P.
# Current suggestion.
self.suggestion = None
# The history search text. (Used for filtering the history when we
# browse through it.)
self.history_search_text = None
# Undo/redo stacks
self._undo_stack = [] # Stack of (text, cursor_position)
self._redo_stack = []
#: The working lines. Similar to history, except that this can be
#: modified. The user can press arrow_up and edit previous entries.
#: Ctrl-C should reset this, and copy the whole history back in here.
#: Enter should process the current command and append to the real
#: history.
self._working_lines = self.history.strings[:]
self._working_lines.append(initial_document.text)
self.__working_index = len(self._working_lines) - 1
# <getters/setters>
def _set_text(self, value):
""" set text at current working_index. Return whether it changed. """
working_index = self.working_index
working_lines = self._working_lines
original_value = working_lines[working_index]
working_lines[working_index] = value
# Return True when this text has been changed.
if len(value) != len(original_value):
# For Python 2, it seems that when two strings have a different
# length and one is a prefix of the other, Python still scans
# character by character to see whether the strings are different.
# (Some benchmarking showed significant differences for big
# documents. >100,000 of lines.)
return True
elif value != original_value:
return True
return False
def _set_cursor_position(self, value):
""" Set cursor position. Return whether it changed. """
original_position = self.__cursor_position
self.__cursor_position = max(0, value)
return value != original_position
@property
def text(self):
return self._working_lines[self.working_index]
@text.setter
def text(self, value):
"""
Setting text. (When doing this, make sure that the cursor_position is
valid for this text. text/cursor_position should be consistent at any time,
otherwise set a Document instead.)
"""
assert isinstance(value, six.text_type), 'Got %r' % value
assert self.cursor_position <= len(value)
# Don't allow editing of read-only buffers.
if self.read_only():
raise EditReadOnlyBuffer()
changed = self._set_text(value)
if changed:
self._text_changed()
# Reset history search text.
self.history_search_text = None
@property
def cursor_position(self):
return self.__cursor_position
@cursor_position.setter
def cursor_position(self, value):
"""
Setting cursor position.
"""
assert isinstance(value, int)
assert value <= len(self.text)
changed = self._set_cursor_position(value)
if changed:
self._cursor_position_changed()
@property
def working_index(self):
return self.__working_index
@working_index.setter
def working_index(self, value):
if self.__working_index != value:
self.__working_index = value
self._text_changed()
def _text_changed(self):
# Remove any validation errors and complete state.
self.validation_error = None
self.complete_state = None
self.selection_state = None
self.suggestion = None
self.preferred_column = None
# fire 'on_text_changed' event.
self.on_text_changed.fire()
def _cursor_position_changed(self):
# Remove any validation errors and complete state.
self.validation_error = None
self.complete_state = None
# Unset preferred_column. (Will be set after the cursor movement, if
# required.)
self.preferred_column = None
# Note that the cursor position can change if we have a selection the
# new position of the cursor determines the end of the selection.
# fire 'on_cursor_position_changed' event.
self.on_cursor_position_changed.fire()
@property
def document(self):
"""
Return :class:`~prompt_toolkit.document.Document` instance from the
current text, cursor position and selection state.
"""
return self._document_cache[
self.text, self.cursor_position, self.selection_state]
@document.setter
def document(self, value):
"""
Set :class:`~prompt_toolkit.document.Document` instance.
This will set both the text and cursor position at the same time, but
atomically. (Change events will be triggered only after both have been set.)
"""
self.set_document(value)
def set_document(self, value, bypass_readonly=False):
"""
Set :class:`~prompt_toolkit.document.Document` instance. Like the
``document`` property, but accept an ``bypass_readonly`` argument.
:param bypass_readonly: When True, don't raise an
:class:`.EditReadOnlyBuffer` exception, even
when the buffer is read-only.
"""
assert isinstance(value, Document)
# Don't allow editing of read-only buffers.
if not bypass_readonly and self.read_only():
raise EditReadOnlyBuffer()
# Set text and cursor position first.
text_changed = self._set_text(value.text)
cursor_position_changed = self._set_cursor_position(value.cursor_position)
# Now handle change events. (We do this when text/cursor position is
# both set and consistent.)
if text_changed:
self._text_changed()
if cursor_position_changed:
self._cursor_position_changed()
# End of <getters/setters>
def save_to_undo_stack(self, clear_redo_stack=True):
"""
Safe current state (input text and cursor position), so that we can
restore it by calling undo.
"""
# Safe if the text is different from the text at the top of the stack
# is different. If the text is the same, just update the cursor position.
if self._undo_stack and self._undo_stack[-1][0] == self.text:
self._undo_stack[-1] = (self._undo_stack[-1][0], self.cursor_position)
else:
self._undo_stack.append((self.text, self.cursor_position))
# Saving anything to the undo stack, clears the redo stack.
if clear_redo_stack:
self._redo_stack = []
def transform_lines(self, line_index_iterator, transform_callback):
"""
Transforms the text on a range of lines.
When the iterator yield an index not in the range of lines that the
document contains, it skips them silently.
To uppercase some lines::
new_text = transform_lines(range(5,10), lambda text: text.upper())
:param line_index_iterator: Iterator of line numbers (int)
:param transform_callback: callable that takes the original text of a
line, and return the new text for this line.
:returns: The new text.
"""
# Split lines
lines = self.text.split('\n')
# Apply transformation
for index in line_index_iterator:
try:
lines[index] = transform_callback(lines[index])
except IndexError:
pass
return '\n'.join(lines)
def transform_current_line(self, transform_callback):
"""
Apply the given transformation function to the current line.
:param transform_callback: callable that takes a string and return a new string.
"""
document = self.document
a = document.cursor_position + document.get_start_of_line_position()
b = document.cursor_position + document.get_end_of_line_position()
self.text = (
document.text[:a] +
transform_callback(document.text[a:b]) +
document.text[b:])
def transform_region(self, from_, to, transform_callback):
"""
Transform a part of the input string.
:param from_: (int) start position.
:param to: (int) end position.
:param transform_callback: Callable which accepts a string and returns
the transformed string.
"""
assert from_ < to
self.text = ''.join([
self.text[:from_] +
transform_callback(self.text[from_:to]) +
self.text[to:]
])
def cursor_left(self, count=1):
self.cursor_position += self.document.get_cursor_left_position(count=count)
def cursor_right(self, count=1):
self.cursor_position += self.document.get_cursor_right_position(count=count)
def cursor_up(self, count=1):
""" (for multiline edit). Move cursor to the previous line. """
original_column = self.preferred_column or self.document.cursor_position_col
self.cursor_position += self.document.get_cursor_up_position(
count=count, preferred_column=original_column)
# Remember the original column for the next up/down movement.
self.preferred_column = original_column
def cursor_down(self, count=1):
""" (for multiline edit). Move cursor to the next line. """
original_column = self.preferred_column or self.document.cursor_position_col
self.cursor_position += self.document.get_cursor_down_position(
count=count, preferred_column=original_column)
# Remember the original column for the next up/down movement.
self.preferred_column = original_column
def auto_up(self, count=1):
"""
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_previous(count=count)
elif self.document.cursor_position_row > 0:
self.cursor_up(count=count)
elif not self.selection_state:
self.history_backward(count=count)
def auto_down(self, count=1):
"""
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_next(count=count)
elif self.document.cursor_position_row < self.document.line_count - 1:
self.cursor_down(count=count)
elif not self.selection_state:
self.history_forward(count=count)
def delete_before_cursor(self, count=1):
"""
Delete character before cursor, return deleted character.
"""
assert count >= 0
deleted = ''
if self.cursor_position > 0:
deleted = self.text[self.cursor_position - count:self.cursor_position]
new_text = self.text[:self.cursor_position - count] + self.text[self.cursor_position:]
new_cursor_position = self.cursor_position - len(deleted)
# Set new Document atomically.
self.document = Document(new_text, new_cursor_position)
return deleted
def delete(self, count=1):
"""
Delete one character. Return deleted character.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = self.text[:self.cursor_position] + \
self.text[self.cursor_position + len(deleted):]
return deleted
else:
return ''
def join_next_line(self, separator=' '):
"""
Join the next line to the current one by deleting the line ending after
the current line.
"""
if not self.document.on_last_line:
self.cursor_position += self.document.get_end_of_line_position()
self.delete()
# Remove spaces.
self.text = (self.document.text_before_cursor + separator +
self.document.text_after_cursor.lstrip(' '))
def join_selected_lines(self, separator=' '):
"""
Join the selected lines.
"""
assert self.selection_state
# Get lines.
from_, to = sorted([self.cursor_position, self.selection_state.original_cursor_position])
before = self.text[:from_]
lines = self.text[from_:to].splitlines()
after = self.text[to:]
# Replace leading spaces with just one space.
lines = [l.lstrip(' ') + separator for l in lines]
# Set new document.
self.document = Document(text=before + ''.join(lines) + after,
cursor_position=len(before + ''.join(lines[:-1])) - 1)
def swap_characters_before_cursor(self):
"""
Swap the last two characters before the cursor.
"""
pos = self.cursor_position
if pos >= 2:
a = self.text[pos - 2]
b = self.text[pos - 1]
self.text = self.text[:pos-2] + b + a + self.text[pos:]
def go_to_history(self, index):
"""
Go to this item in the history.
"""
if index < len(self._working_lines):
self.working_index = index
self.cursor_position = len(self.text)
def complete_next(self, count=1, disable_wrap_around=False):
"""
Browse to the next completions.
(Does nothing if there are no completion.)
"""
if self.complete_state:
completions_count = len(self.complete_state.current_completions)
if self.complete_state.complete_index is None:
index = 0
elif self.complete_state.complete_index == completions_count - 1:
index = None
if disable_wrap_around:
return
else:
index = min(completions_count-1, self.complete_state.complete_index + count)
self.go_to_completion(index)
def complete_previous(self, count=1, disable_wrap_around=False):
"""
Browse to the previous completions.
(Does nothing if there are no completion.)
"""
if self.complete_state:
if self.complete_state.complete_index == 0:
index = None
if disable_wrap_around:
return
elif self.complete_state.complete_index is None:
index = len(self.complete_state.current_completions) - 1
else:
index = max(0, self.complete_state.complete_index - count)
self.go_to_completion(index)
def cancel_completion(self):
"""
Cancel completion, go back to the original text.
"""
if self.complete_state:
self.go_to_completion(None)
self.complete_state = None
def set_completions(self, completions, go_to_first=True, go_to_last=False):
"""
Start completions. (Generate list of completions and initialize.)
"""
assert not (go_to_first and go_to_last)
# Generate list of all completions.
if completions is None:
if self.completer:
completions = list(self.completer.get_completions(
self.document,
CompleteEvent(completion_requested=True)
))
else:
completions = []
# Set `complete_state`.
if completions:
self.complete_state = CompletionState(
original_document=self.document,
current_completions=completions)
if go_to_first:
self.go_to_completion(0)
elif go_to_last:
self.go_to_completion(len(completions) - 1)
else:
self.go_to_completion(None)
else:
self.complete_state = None
def start_history_lines_completion(self):
"""
Start a completion based on all the other lines in the document and the
history.
"""
found_completions = set()
completions = []
# For every line of the whole history, find matches with the current line.
current_line = self.document.current_line_before_cursor.lstrip()
for i, string in enumerate(self._working_lines):
for j, l in enumerate(string.split('\n')):
l = l.strip()
if l and l.startswith(current_line):
# When a new line has been found.
if l not in found_completions:
found_completions.add(l)
# Create completion.
if i == self.working_index:
display_meta = "Current, line %s" % (j+1)
else:
display_meta = "History %s, line %s" % (i+1, j+1)
completions.append(Completion(
l,
start_position=-len(current_line),
display_meta=display_meta))
self.set_completions(completions=completions[::-1])
def go_to_completion(self, index):
"""
Select a completion from the list of current completions.
"""
assert index is None or isinstance(index, int)
assert self.complete_state
# Set new completion
state = self.complete_state.go_to_index(index)
# Set text/cursor position
new_text, new_cursor_position = state.new_text_and_position()
self.document = Document(new_text, new_cursor_position)
# (changing text/cursor position will unset complete_state.)
self.complete_state = state
def apply_completion(self, completion):
"""
Insert a given completion.
"""
assert isinstance(completion, Completion)
# If there was already a completion active, cancel that one.
if self.complete_state:
self.go_to_completion(None)
self.complete_state = None
# Insert text from the given completion.
self.delete_before_cursor(-completion.start_position)
self.insert_text(completion.text)
def _set_history_search(self):
""" Set `history_search_text`. """
if self.enable_history_search():
if self.history_search_text is None:
self.history_search_text = self.text
else:
self.history_search_text = None
def _history_matches(self, i):
"""
True when the current entry matches the history search.
(when we don't have history search, it's also True.)
"""
return (self.history_search_text is None or
self._working_lines[i].startswith(self.history_search_text))
def history_forward(self, count=1):
"""
Move forwards through the history.
:param count: Amount of items to move forward.
"""
self._set_history_search()
# Go forward in history.
found_something = False
for i in range(self.working_index + 1, len(self._working_lines)):
if self._history_matches(i):
self.working_index = i
count -= 1
found_something = True
if count == 0:
break
# If we found an entry, move cursor to the end of the first line.
if found_something:
self.cursor_position = 0
self.cursor_position += self.document.get_end_of_line_position()
def history_backward(self, count=1):
"""
Move backwards through history.
"""
self._set_history_search()
# Go back in history.
found_something = False
for i in range(self.working_index - 1, -1, -1):
if self._history_matches(i):
self.working_index = i
count -= 1
found_something = True
if count == 0:
break
# If we move to another entry, move cursor to the end of the line.
if found_something:
self.cursor_position = len(self.text)
def start_selection(self, selection_type=SelectionType.CHARACTERS):
"""
Take the current cursor position as the start of this selection.
"""
self.selection_state = SelectionState(self.cursor_position, selection_type)
def copy_selection(self, _cut=False):
"""
Copy selected text and return :class:`.ClipboardData` instance.
"""
new_document, clipboard_data = self.document.cut_selection()
if _cut:
self.document = new_document
self.selection_state = None
return clipboard_data
def cut_selection(self):
"""
Delete selected text and return :class:`.ClipboardData` instance.
"""
return self.copy_selection(_cut=True)
def paste_clipboard_data(self, data, before=False, count=1):
"""
Insert the data from the clipboard.
"""
assert isinstance(data, ClipboardData)
self.document = self.document.paste_clipboard_data(data, before=before, count=count)
def newline(self, copy_margin=True):
"""
Insert a line ending at the current position.
"""
if copy_margin:
self.insert_text('\n' + self.document.leading_whitespace_in_current_line)
else:
self.insert_text('\n')
def insert_line_above(self, copy_margin=True):
"""
Insert a new line above the current one.
"""
if copy_margin:
insert = self.document.leading_whitespace_in_current_line + '\n'
else:
insert = '\n'
self.cursor_position += self.document.get_start_of_line_position()
self.insert_text(insert)
self.cursor_position -= 1
def insert_line_below(self, copy_margin=True):
"""
Insert a new line below the current one.
"""
if copy_margin:
insert = '\n' + self.document.leading_whitespace_in_current_line
else:
insert = '\n'
self.cursor_position += self.document.get_end_of_line_position()
self.insert_text(insert)
def insert_text(self, data, overwrite=False, move_cursor=True, fire_event=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = self.text
ocpos = self.cursor_position
# In insert/text mode.
if overwrite:
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos:ocpos + len(data)]
if '\n' in overwritten_text:
overwritten_text = overwritten_text[:overwritten_text.find('\n')]
self.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):]
else:
self.text = otext[:ocpos] + data + otext[ocpos:]
if move_cursor:
self.cursor_position += len(data)
# Fire 'on_text_insert' event.
if fire_event:
self.on_text_insert.fire()
def undo(self):
# Pop from the undo-stack until we find a text that if different from
# the current text. (The current logic of `save_to_undo_stack` will
# cause that the top of the undo stack is usually the same as the
# current text, so in that case we have to pop twice.)
while self._undo_stack:
text, pos = self._undo_stack.pop()
if text != self.text:
# Push current text to redo stack.
self._redo_stack.append((self.text, self.cursor_position))
# Set new text/cursor_position.
self.document = Document(text, cursor_position=pos)
break
def redo(self):
if self._redo_stack:
# Copy current state on undo stack.
self.save_to_undo_stack(clear_redo_stack=False)
# Pop state from redo stack.
text, pos = self._redo_stack.pop()
self.document = Document(text, cursor_position=pos)
def validate(self):
"""
Returns `True` if valid.
"""
self.validation_error = None
# Validate first. If not valid, set validation exception.
if self.validator:
try:
self.validator.validate(self.document)
except ValidationError as e:
# Set cursor position (don't allow invalid values.)
cursor_position = e.cursor_position
self.cursor_position = min(max(0, cursor_position), len(self.text))
self.validation_error = e
return False
return True
def append_to_history(self):
"""
Append the current input to the history.
(Only if valid input.)
"""
# Validate first. If not valid, set validation exception.
if not self.validate():
return
# Save at the tail of the history. (But don't if the last entry the
# history is already the same.)
if self.text and (not len(self.history) or self.history[-1] != self.text):
self.history.append(self.text)
def _search(self, search_state, include_current_position=False, count=1):
"""
Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found.
"""
assert isinstance(search_state, SearchState)
assert isinstance(count, int) and count > 0
text = search_state.text
direction = search_state.direction
ignore_case = search_state.ignore_case()
def search_once(working_index, document):
"""
Do search one time.
Return (working_index, document) or `None`
"""
if direction == IncrementalSearchDirection.FORWARD:
# Try find at the current input.
new_index = document.find(
text, include_current_position=include_current_position,
ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go forward in the history. (Include len+1 to wrap around.)
# (Here we should always include all cursor positions, because
# it's a different line.)
for i in range(working_index + 1, len(self._working_lines) + 1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], 0)
new_index = document.find(text, include_current_position=True,
ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, new_index))
else:
# Try find at the current input.
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go back in the history. (Include -1 to wrap around.)
for i in range(working_index - 1, -2, -1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], len(self._working_lines[i]))
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, len(document.text) + new_index))
# Do 'count' search iterations.
working_index = self.working_index
document = self.document
for _ in range(count):
result = search_once(working_index, document)
if result is None:
return # Nothing found.
else:
working_index, document = result
return (working_index, document.cursor_position)
def document_for_search(self, search_state):
"""
Return a :class:`~prompt_toolkit.document.Document` instance that has
the text/cursor position for this search, if we would apply it.
"""
search_result = self._search(search_state, include_current_position=True)
if search_result is None:
return self.document
else:
working_index, cursor_position = search_result
return Document(self._working_lines[working_index], cursor_position)
def apply_search(self, search_state, include_current_position=True, count=1):
"""
Apply search. If something is found, set `working_index` and
`cursor_position`.
"""
search_result = self._search(
search_state, include_current_position=include_current_position, count=count)
if search_result is not None:
working_index, cursor_position = search_result
self.working_index = working_index
self.cursor_position = cursor_position
def exit_selection(self):
self.selection_state = None
def open_in_editor(self, cli):
"""
Open code in editor.
:param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`
instance.
"""
if self.read_only():
raise EditReadOnlyBuffer()
# Write to temporary file
descriptor, filename = tempfile.mkstemp(self.tempfile_suffix)
os.write(descriptor, self.text.encode('utf-8'))
os.close(descriptor)
# Open in editor
# (We need to use `cli.run_in_terminal`, because not all editors go to
# the alternate screen buffer, and some could influence the cursor
# position.)
succes = cli.run_in_terminal(lambda: self._open_file_in_editor(filename))
# Read content again.
if succes:
with open(filename, 'rb') as f:
text = f.read().decode('utf-8')
# Drop trailing newline. (Editors are supposed to add it at the
# end, but we don't need it.)
if text.endswith('\n'):
text = text[:-1]
self.document = Document(
text=text,
cursor_position=len(text))
# Clean up temp file.
os.remove(filename)
def _open_file_in_editor(self, filename):
"""
Call editor executable.
Return True when we received a zero return code.
"""
# If the 'EDITOR' environment variable has been set, use that one.
# Otherwise, fall back to the first available editor that we can find.
editor = os.environ.get('EDITOR')
editors = [
editor,
# Order of preference.
'/usr/bin/editor',
'/usr/bin/nano',
'/usr/bin/pico',
'/usr/bin/vi',
'/usr/bin/emacs',
]
for e in editors:
if e:
try:
returncode = subprocess.call([e, filename])
return returncode == 0
except OSError:
# Executable does not exist, try the next one.
pass
return False
def indent(buffer, from_row, to_row, count=1):
"""
Indent text of a :class:`.Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
# Apply transformation.
new_text = buffer.transform_lines(line_range, lambda l: ' ' * count + l)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
def unindent(buffer, from_row, to_row, count=1):
"""
Unindent text of a :class:`.Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
def transform(text):
remove = ' ' * count
if text.startswith(remove):
return text[len(remove):]
else:
return text.lstrip()
# Apply transformation.
new_text = buffer.transform_lines(line_range, transform)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
def reshape_text(buffer, from_row, to_row):
"""
Reformat text, taking the width into account.
`to_row` is included.
(Vi 'gq' operator.)
"""
lines = buffer.text.splitlines(True)
lines_before = lines[:from_row]
lines_after = lines[to_row + 1:]
lines_to_reformat = lines[from_row:to_row + 1]
if lines_to_reformat:
# Take indentation from the first line.
length = re.search(r'^\s*', lines_to_reformat[0]).end()
indent = lines_to_reformat[0][:length].replace('\n', '')
# Now, take all the 'words' from the lines to be reshaped.
words = ''.join(lines_to_reformat).split()
# And reshape.
width = (buffer.text_width or 80) - len(indent)
reshaped_text = [indent]
current_width = 0
for w in words:
if current_width:
if len(w) + current_width + 1 > width:
reshaped_text.append('\n')
reshaped_text.append(indent)
current_width = 0
else:
reshaped_text.append(' ')
current_width += 1
reshaped_text.append(w)
current_width += len(w)
if reshaped_text[-1] != '\n':
reshaped_text.append('\n')
# Apply result.
buffer.document = Document(
text=''.join(lines_before + reshaped_text + lines_after),
cursor_position=len(''.join(lines_before + reshaped_text)))
| {
"content_hash": "157d7ba6a56ddabdbad5ed03c9cfc97f",
"timestamp": "",
"source": "github",
"line_count": 1254,
"max_line_length": 102,
"avg_line_length": 35.90829346092504,
"alnum_prop": 0.592529258922028,
"repo_name": "Sorsly/subtle",
"id": "739486a4982272237205419f244b48705edc828c",
"size": "45029",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/third_party/prompt_toolkit/buffer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
from test_util import ScanPointGeneratorTest
from scanpointgenerator import Excluder
from pkg_resources import require
require("mock")
from mock import MagicMock
class ExcluderTest(unittest.TestCase):
def test_init(self):
Excluder(["x", "y"])
class SimpleFunctionsTest(unittest.TestCase):
def setUp(self):
self.e = Excluder(["x", "y"])
def test_create_mask(self):
with self.assertRaises(NotImplementedError):
self.e.create_mask(MagicMock(), MagicMock())
class SerialisationTest(unittest.TestCase):
def setUp(self):
self.e = Excluder(["x", "y"])
def test_to_dict(self):
expected_dict = dict()
expected_dict['axes'] = ["x", "y"]
d = self.e.to_dict()
self.assertEqual(expected_dict, d)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b8bdaf6ff4b4932d1e50146b83ae1ed1",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 62,
"avg_line_length": 20.955555555555556,
"alnum_prop": 0.6426299045599152,
"repo_name": "dls-controls/scanpointgenerator",
"id": "7bfc3db67a1ff6eeac8b2b10e5b1368c56f11f3b",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core/test_excluder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1190"
},
{
"name": "Python",
"bytes": "270157"
}
],
"symlink_target": ""
} |
import re
from django.db import models, connection, transaction
from django.db.utils import IntegrityError
from django.core.exceptions import ValidationError
from django.forms.models import model_to_dict
from kobo.rpmlib import parse_nvra
from pdc.apps.common.models import get_cached_id
from pdc.apps.common.validators import validate_md5, validate_sha1, validate_sha256
from pdc.apps.common.hacks import add_returning, parse_epoch_version
from pdc.apps.common.constants import ARCH_SRC
from pdc.apps.release.models import Release
from pdc.apps.compose.models import ComposeAcceptanceTestingState
class RPM(models.Model):
name = models.CharField(max_length=200, db_index=True)
epoch = models.PositiveIntegerField()
version = models.CharField(max_length=200, db_index=True)
release = models.CharField(max_length=200, db_index=True)
arch = models.CharField(max_length=200, db_index=True) # nosrc
srpm_name = models.CharField(max_length=200, db_index=True) # package (name of srpm)
srpm_nevra = models.CharField(max_length=200, null=True, blank=True, db_index=True)
# Well behaved filenames are unique, but that is enforced by having unique NVRA.
filename = models.CharField(max_length=4096)
linked_releases = models.ManyToManyField('release.Release', related_name='linked_rpms')
class Meta:
unique_together = (
("name", "epoch", "version", "release", "arch"),
)
def __unicode__(self):
return u"%s.rpm" % self.nevra
def linked_composes(self):
from pdc.apps.compose.models import Compose
"""Return a set of all composes that this RPM is linked"""
return Compose.objects.filter(variant__variantarch__composerpm__rpm=self).distinct()
@property
def nevra(self):
return u"%s-%s:%s-%s.%s" % (self.name, self.epoch, self.version, self.release, self.arch)
@staticmethod
def check_srpm_nevra(srpm_nevra, arch):
# srpm_nevra should be empty if and only if arch is src.
if (arch == ARCH_SRC) == bool(srpm_nevra):
raise ValidationError("RPM's srpm_nevra should be empty if and only if arch is src")
def export(self, fields=None):
_fields = (set(['name', 'epoch', 'version', 'release', 'arch', 'filename',
'srpm_name', 'srpm_nevra', 'linked_releases', 'dependencies'])
if fields is None else set(fields))
result = model_to_dict(self, fields=_fields - {'linked_releases'})
if 'linked_releases' in _fields:
result['linked_releases'] = []
for linked_release in self.linked_releases.all():
result['linked_releases'].append(linked_release.release_id)
if 'dependencies' in _fields:
result['dependencies'] = self.dependencies
return result
@staticmethod
def default_filename(data):
"""
Create default file name based on name, version, release and arch. If
the data does not contain all these values, None is returned.
"""
try:
return '{name}-{version}-{release}.{arch}.rpm'.format(**data)
except KeyError:
return None
def save(self, *args, **kwargs):
self.check_srpm_nevra(self.srpm_nevra, self.arch)
super(RPM, self).save(*args, **kwargs)
@staticmethod
def bulk_insert(cursor, rpm_nevra, filename, srpm_nevra=None):
nvra = parse_nvra(rpm_nevra)
if srpm_nevra:
srpm_name = parse_nvra(srpm_nevra)["name"]
else:
srpm_name = nvra["name"]
sql = add_returning("""INSERT INTO %s (name, epoch, version, release, arch, srpm_nevra, srpm_name, filename)
VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s)""" % RPM._meta.db_table)
try:
sid = transaction.savepoint()
RPM.check_srpm_nevra(rpm_nevra, srpm_nevra)
cursor.execute(sql, [nvra["name"], nvra["epoch"], nvra["version"], nvra["release"],
nvra["arch"], srpm_nevra, srpm_name, filename])
if connection.features.can_return_id_from_insert:
insert_id = connection.ops.fetch_returned_insert_id(cursor)
else:
insert_id = connection.ops.last_insert_id(cursor, RPM._meta.db_table, "id")
except (IntegrityError, ValidationError):
transaction.savepoint_rollback(sid)
cursor.execute("""SELECT %s FROM %s WHERE name=%%s AND epoch=%%s AND
version=%%s and release=%%s AND arch=%%s""" % ("id", RPM._meta.db_table),
[nvra["name"], nvra["epoch"], nvra["version"], nvra["release"], nvra["arch"]])
insert_id = int(cursor.fetchone()[0])
transaction.savepoint_commit(sid)
return insert_id
@property
def sort_key(self):
return (self.epoch, parse_epoch_version(self.version), parse_epoch_version(self.release))
@property
def dependencies(self):
"""
Get a dict with all deps of the RPM. All types of dependencies are
included.
"""
result = {}
choices = dict(Dependency.DEPENDENCY_TYPE_CHOICES)
for type in choices.values():
result[type] = []
for dep in Dependency.objects.filter(rpm=self):
result[choices[dep.type]].append(unicode(dep))
return result
class Dependency(models.Model):
PROVIDES = 1
REQUIRES = 2
OBSOLETES = 3
CONFLICTS = 4
RECOMMENDS = 5
SUGGESTS = 6
DEPENDENCY_TYPE_CHOICES = (
(PROVIDES, 'provides'),
(REQUIRES, 'requires'),
(OBSOLETES, 'obsoletes'),
(CONFLICTS, 'conflicts'),
(RECOMMENDS, 'recommends'),
(SUGGESTS, 'suggests'),
)
DEPENDENCY_PARSER = re.compile(r'^(?P<name>[^ <>=]+)( *(?P<op>=|>=|<=|<|>) *(?P<version>[^ <>=]+))?$')
type = models.PositiveIntegerField(choices=DEPENDENCY_TYPE_CHOICES)
name = models.CharField(max_length=200)
version = models.CharField(max_length=200, blank=True, null=True)
comparison = models.CharField(max_length=50, blank=True, null=True)
rpm = models.ForeignKey(RPM)
def __unicode__(self):
base_str = self.name
if self.version:
base_str += ' {comparison} {version}'.format(comparison=self.comparison,
version=self.version)
return base_str
def clean(self):
"""
When version constraint is set, both a version and comparison type must
be specified.
"""
if (self.version is None) != (self.comparison is None):
# This code should be unreachable based on user input, and only
# programmer error can cause this to fail.
raise ValidationError('Bad version constraint: both version and comparison must be specified.')
@property
def parsed_version(self):
if not hasattr(self, '_version'):
self._version = parse_epoch_version(self.version)
return self._version
def is_satisfied_by(self, other):
"""
Check if other version satisfies this dependency.
:paramtype other: string
"""
funcs = {
'=': lambda x: x == self.parsed_version,
'<': lambda x: x < self.parsed_version,
'<=': lambda x: x <= self.parsed_version,
'>': lambda x: x > self.parsed_version,
'>=': lambda x: x >= self.parsed_version,
}
return funcs[self.comparison](parse_epoch_version(other))
def is_equal(self, other):
"""
Return true if the other version is equal to version in this dep.
:paramtype other: string
"""
return self.parsed_version == parse_epoch_version(other)
def is_higher(self, other):
"""
Return true if version in this dep is higher than the other version.
:paramtype other: string
"""
return self.parsed_version > parse_epoch_version(other)
def is_lower(self, other):
"""
Return true if version in this dep is lower than the other version.
:paramtype other: string
"""
return self.parsed_version < parse_epoch_version(other)
class ImageFormat(models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True)
def __unicode__(self):
return u"%s" % self.name
CACHE = {}
@classmethod
def get_cached_id(cls, value):
"""cached `name` to `id`"""
return get_cached_id(cls, "name", value)
class ImageType(models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True)
def __unicode__(self):
return u"%s" % self.name
CACHE = {}
@classmethod
def get_cached_id(cls, value):
"""cached `name` to `id`"""
return get_cached_id(cls, "name", value)
class Image(models.Model):
file_name = models.CharField(max_length=200, db_index=True)
image_format = models.ForeignKey(ImageFormat)
image_type = models.ForeignKey(ImageType)
disc_number = models.PositiveIntegerField()
disc_count = models.PositiveIntegerField()
arch = models.CharField(max_length=200, db_index=True)
mtime = models.BigIntegerField()
size = models.BigIntegerField()
bootable = models.BooleanField(default=False)
implant_md5 = models.CharField(max_length=32)
volume_id = models.CharField(max_length=32, null=True, blank=True)
# TODO: move checksums to a different table? need at least one manadatory checksum to identify ISOs
md5 = models.CharField(max_length=32, null=True, blank=True, validators=[validate_md5])
sha1 = models.CharField(max_length=40, null=True, blank=True, validators=[validate_sha1])
sha256 = models.CharField(max_length=64, validators=[validate_sha256])
class Meta:
unique_together = (
("file_name", "sha256"),
)
def __unicode__(self):
return u"%s" % self.file_name
def composes(self):
"""Return a set of all composes that this image belongs to."""
return set([ci.variant_arch.variant.compose for ci in self.composeimage_set.all()])
class Archive(models.Model):
build_nvr = models.CharField(max_length=200, db_index=True)
name = models.CharField(max_length=200, db_index=True)
size = models.BigIntegerField()
md5 = models.CharField(max_length=32, validators=[validate_md5])
class Meta:
unique_together = (
('build_nvr', 'name', 'md5'),
)
def __unicode__(self):
return u"%s" % self.name
def export(self, fields=None):
_fields = ['build_nvr', 'name', 'size', 'md5'] if fields is None else fields
return model_to_dict(self, fields=_fields)
class BuildImage(models.Model):
image_id = models.CharField(max_length=200)
image_format = models.ForeignKey(ImageFormat)
md5 = models.CharField(max_length=32, validators=[validate_md5])
rpms = models.ManyToManyField(RPM)
archives = models.ManyToManyField(Archive)
releases = models.ManyToManyField(Release)
test_result = models.ForeignKey(ComposeAcceptanceTestingState,
default=ComposeAcceptanceTestingState.get_untested)
class Meta:
unique_together = (
("image_id", "image_format"),
)
def __unicode__(self):
return u"%s-%s" % (self.image_id, self.image_format)
def export(self, fields=None):
_fields = ['image_id', 'image_format', 'md5',
'rpms', 'archives', 'releases', 'test_result'] if fields is None else fields
result = dict()
if 'image_id' in _fields:
result['image_id'] = self.image_id
if 'md5' in _fields:
result['md5'] = self.md5
if 'image_format' in _fields:
result['image_format'] = self.image_format.name
if 'test_result' in _fields:
result['test_result'] = self.test_result.name
for field in ('rpms', 'archives', 'releases'):
if field in _fields:
result[field] = []
objects = getattr(self, field).all()
for obj in objects:
result[field].append(obj.export())
return result
| {
"content_hash": "67b26072b57153eea29e1680609a822b",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 116,
"avg_line_length": 37.98516320474778,
"alnum_prop": 0.5887821263963753,
"repo_name": "xychu/product-definition-center",
"id": "d41e0e86788f9174a3f751382d6c6af42646fe25",
"size": "12910",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdc/apps/package/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1767"
},
{
"name": "HTML",
"bytes": "49433"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "1189218"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
import os
import sys
import shlex
from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
# Determine if a base directory has been provided with the --basedir option
in_tree = False
# Add compiler flags if debug is set
compile_args = ['-Wno-unused-function']
for arg in sys.argv:
if arg.startswith('--debug'):
# Note from GCC manual:
# If you use multiple -O options, with or without level numbers,
# the last such option is the one that is effective.
compile_args.extend('-Wall -O0 -g'.split())
elif arg.startswith('--basedir='):
basedir = arg.split('=')[1]
sys.argv.remove(arg)
in_tree = True
# If a base directory has been provided, we use it
if in_tree:
netsnmp_libs = os.popen(basedir + '/net-snmp-config --libs').read()
libdirs = os.popen('{0}/net-snmp-config --build-lib-dirs {1}'.format(basedir, basedir)).read() # noqa
incdirs = os.popen('{0}/net-snmp-config --build-includes {1}'.format(basedir, basedir)).read() # noqa
libs = [flag[2:] for flag in shlex.split(netsnmp_libs) if flag.startswith('-l')] # noqa
libdirs = [flag[2:] for flag in shlex.split(libdirs) if flag.startswith('-L')] # noqa
incdirs = [flag[2:] for flag in shlex.split(incdirs) if flag.startswith('-I')] # noqa
# Otherwise, we use the system-installed SNMP libraries
else:
netsnmp_libs = os.popen('net-snmp-config --libs').read()
libs = [flag[2:] for flag in shlex.split(netsnmp_libs) if flag.startswith('-l')] # noqa
libdirs = [flag[2:] for flag in shlex.split(netsnmp_libs) if flag.startswith('-L')] # noqa
incdirs = []
# Setup the py.test class for use with the test command
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Read the long description from readme.rst
try:
with open('setup.rst') as f:
long_description = f.read()
except IOError:
long_description = None
setup(
name='easysnmp',
version='3.0',
description='A blazingly fast and Pythonic SNMP library based on the '
'official Net-SNMP bindings',
long_description=long_description,
author='Fotis Gimian',
author_email='fgimiansoftware@gmail.com',
url='https://github.com/fgimian/easysnmp',
license='BSD',
packages=['easysnmp'],
tests_require=['pytest-cov', 'pytest-flake8', 'pytest-sugar', 'pytest'],
cmdclass={'test': PyTest},
ext_modules=[
Extension(
'easysnmp.interface', ['easysnmp/interface.c'],
library_dirs=libdirs, include_dirs=incdirs, libraries=libs,
extra_compile_args=compile_args
)
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring'
]
)
| {
"content_hash": "cf5fbc9917e6b0ff958d0a59c35b8403",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 106,
"avg_line_length": 35.82692307692308,
"alnum_prop": 0.6301663982823403,
"repo_name": "normanuber/ezsnmp",
"id": "e29a0b10636f604b98d47aff5eb219a8a2c7369e",
"size": "3726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "103527"
},
{
"name": "Python",
"bytes": "75250"
}
],
"symlink_target": ""
} |
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-trace"
description = "Cloud Trace API client library"
version = "1.7.3"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.32.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*",
"proto-plus >= 1.22.0, <2.0.0dev",
"protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
]
extras = {}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/googleapis/python-trace",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=3.7",
scripts=[
"scripts/fixup_trace_v1_keywords.py",
"scripts/fixup_trace_v2_keywords.py",
],
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "914cf2a57184020bfee170d8442a0b5d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 113,
"avg_line_length": 29.54320987654321,
"alnum_prop": 0.6385290430422065,
"repo_name": "googleapis/python-trace",
"id": "16aa427ef7f53253483d08479a9b5bfb25703c57",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "404724"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class AddToWatchList(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the AddToWatchList Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(AddToWatchList, self).__init__(temboo_session, '/Library/eBay/Trading/AddToWatchList')
def new_input_set(self):
return AddToWatchListInputSet()
def _make_result_set(self, result, path):
return AddToWatchListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddToWatchListChoreographyExecution(session, exec_id, path)
class AddToWatchListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the AddToWatchList
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ItemID(self, value):
"""
Set the value of the ItemID input for this Choreo. ((required, string) The ID of an item to add to a user's watch list. This can be a comma-separated list of item IDs.)
"""
super(AddToWatchListInputSet, self)._set_input('ItemID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(AddToWatchListInputSet, self)._set_input('ResponseFormat', value)
def set_SandboxMode(self, value):
"""
Set the value of the SandboxMode input for this Choreo. ((conditional, boolean) Indicates that the request should be made to the sandbox endpoint instead of the production endpoint. Set to 1 to enable sandbox mode.)
"""
super(AddToWatchListInputSet, self)._set_input('SandboxMode', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((optional, string) The eBay site ID that you want to access. Defaults to 0 indicating the US site.)
"""
super(AddToWatchListInputSet, self)._set_input('SiteID', value)
def set_UserToken(self, value):
"""
Set the value of the UserToken input for this Choreo. ((required, string) A valid eBay Auth Token.)
"""
super(AddToWatchListInputSet, self)._set_input('UserToken', value)
class AddToWatchListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the AddToWatchList Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from eBay.)
"""
return self._output.get('Response', None)
class AddToWatchListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddToWatchListResultSet(response, path)
| {
"content_hash": "a67f5a0ca241903602ec018079ea6e26",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 223,
"avg_line_length": 43.71052631578947,
"alnum_prop": 0.695364238410596,
"repo_name": "jordanemedlock/psychtruths",
"id": "83b33c6ab18c45312bf1ad6e4a416c7712741a17",
"size": "4198",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "temboo/Library/eBay/Trading/AddToWatchList.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
'''
Transparent use of prepared statements with Postgresql.
Usage example with psycgopg2: We create a cursor that provides
both with dict-like field access and prepared statements.
from psycopg2.extensions import connection as _connection
from psycopg2.extras import RealDictCursor
from this_recipe import PrepCursorMixin
class Cursor(PrepCursorMixin, RealDictCursor):
pass
class Connection(_connection):
def cursor(self):
return super(Connection, self).cursor(cursor_factory=Cursor)
def connect(*a, **kw):
return Connection(*a, **kw)
'''
import re
class PrepCursorMixin(object):
'''
mix in with dbapi cursor class
formatRe fishes out all format specifiers for a given paramstyle
this one works with paramstyles 'format' or 'pyformat'
'''
formatRe = re.compile('(\%s|\%\([\w\.]+\)s)', re.DOTALL)
def __init__(self, *a, **kw):
super(PrepCursorMixin, self).__init__(*a, **kw)
# preferably store prepd statements on connection
conn = getattr(self, 'connection', None)
if conn:
pc = getattr(conn, 'prepCache', {})
self.prepCache = self.connection.prepCache = pc
else:
self.prepCache = {}
def executeps(self, cmd, args=None):
'''
execute a command using a prepared statement.
'''
prepStmt = self.prepCache.get(cmd)
if prepStmt is None:
cmdId = "ps_%d" % (len(self.prepCache) + 1)
# unique name for new prepared statement
prepStmt = self.prepCache[cmd] = \
self.prepareStatement(cmd, cmdId)
self.execute(prepStmt, args)
def prepareStatement(self, cmd, cmdId):
'''
translate a sql command into its corresponding
prepared statement, and execute the declaration.
'''
specifiers = []
def replaceSpec(mo):
specifiers.append(mo.group())
return '$%d' % len(specifiers)
replacedCmd = self.formatRe.sub(replaceSpec, cmd)
prepCmd = 'prepare %s as %s' % (cmdId, replacedCmd)
if len(specifiers) == 0: # no variable arguments
execCmd = 'execute %s' % cmdId
else: # set up argument slots in prep statement
execCmd = 'execute %s(%s)' % (cmdId, ', '.join(specifiers))
self.execute(prepCmd)
return execCmd
def executemanyps(self, cmd, seq_of_parameters):
'''
prepared statement version of executemany.
'''
for p in seq_of_parameters:
self.executeps(cmd, p)
# Don't want to leave the value of the last execute() call
try:
self.rowcount = -1
except TypeError: # fooks with psycopg
pass
if __name__ == '__main__':
'''
just demonstrate the string mangling that goes on
'''
class DummyBaseCursor(object):
def __init__(self, conn):
self.connection = conn
def execute(self, cmd, args=None):
print 'execute'
print 'cmd:', cmd
print 'args:', args
print '-' * 20
class DummyCursor(PrepCursorMixin, DummyBaseCursor):
def executeps(self, cmd, args):
print 'executeps'
print 'cmd:', cmd
print 'args:', args
super(DummyCursor, self).executeps(cmd, args)
class DummyConnection(object): pass
dc = DummyCursor(DummyConnection)
sql = \
['''
select * from dummies
where name=%s
and surname=%s
''',
'''
select * from dummies
where name=%(name)s
and surname=%(surname)s
''',
'select * from dummies']
theargs = [('Joe','Blow'), {'name':'Joe', 'surname':'Blow'}, None]
for x in range(3):
for y in range(2):
dc.executeps(sql[x], theargs[x])
| {
"content_hash": "883b1db91a18745b5b26dcb45e2ee3e5",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 71,
"avg_line_length": 28.115107913669064,
"alnum_prop": 0.5757420675537359,
"repo_name": "ActiveState/code",
"id": "8bc3b3bffecddd34d73fd4d25b2996248a34330a",
"size": "3908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/576698_Transparently_execute_SQL_queries_prepared/recipe-576698.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import unittest
class TestCase(unittest.TestCase):
def assertEqualCI(self, s1, s2, *args, **kwargs):
self.assertEqual(s1.lower(), s2.lower(), *args, **kwargs)
def assertIs(self, item1, item2, *args, **kwargs):
self.assertTrue(item1 is item2, *args, **kwargs)
def assertIsInstance(self, item, klass, *args, **kwargs):
self.assertTrue(isinstance(item, klass), *args, **kwargs)
| {
"content_hash": "0afba46203ba73dcb010a6f88c90d352",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 32,
"alnum_prop": 0.6538461538461539,
"repo_name": "tjguk/winshell",
"id": "0a2570dc14378fb415e787f9e1758dbeca7b32e3",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45626"
},
{
"name": "Python",
"bytes": "67006"
},
{
"name": "Shell",
"bytes": "422"
}
],
"symlink_target": ""
} |
'''
Created on Sept 28, 2012
@author: jspivey
Simple demo which will connect via ssh to vista.vainnovation.us
'''
import os
import sys
#apparently these are not needed... at least not on windows. Will need to retest this on linux
#sys.path = ['./FunctionalTest/RAS/lib'] + ['./lib/vista'] + sys.path
#sys.path = ['./'] + ['../lib/vista'] + sys.path
import ssh_connect_demo_suite
import TestHelper
def main():
test_suite_driver = TestHelper.TestSuiteDriver(__file__)
test_suite_details = test_suite_driver.generate_test_suite_details()
try:
test_suite_driver.pre_test_suite_run(test_suite_details)
#Begin Tests
ssh_connect_demo_suite.dive_into_menus(test_suite_details)
ssh_connect_demo_suite.demo_screen_man(test_suite_details)
#End Tests
test_suite_driver.post_test_suite_run(test_suite_details)
except Exception, e:
test_suite_driver.exception_handling(test_suite_details, e)
else:
test_suite_driver.try_else_handling(test_suite_details)
finally:
test_suite_driver.finally_handling(test_suite_details)
test_suite_driver.end_method_handling(test_suite_details)
if __name__ == '__main__':
main()
| {
"content_hash": "6bacad7a1c9220c7ab70701c3ab4a4f2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 94,
"avg_line_length": 31.710526315789473,
"alnum_prop": 0.6854771784232365,
"repo_name": "JimDeanSpivey/ATF-for-Vista-FOIA",
"id": "a9d667fef072f34c333d72deccfbfba8578ea7b1",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FunctionalTest/RAS/VistA-FOIA/Packages/SSH Demo/ssh_connect_demo_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "5277"
},
{
"name": "Matlab",
"bytes": "36905"
},
{
"name": "Objective-C",
"bytes": "54582"
},
{
"name": "Python",
"bytes": "602251"
},
{
"name": "Shell",
"bytes": "11232"
}
],
"symlink_target": ""
} |
"""Fixtures for honeywell tests."""
from unittest.mock import create_autospec, patch
import pytest
import somecomfort
from homeassistant.components.honeywell.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry
@pytest.fixture
def config_data():
"""Provide configuration data for tests."""
return {CONF_USERNAME: "fake", CONF_PASSWORD: "user"}
@pytest.fixture
def config_entry(config_data):
"""Create a mock config entry."""
return MockConfigEntry(
domain=DOMAIN,
data=config_data,
options={},
)
@pytest.fixture
def device():
"""Mock a somecomfort.Device."""
mock_device = create_autospec(somecomfort.Device, instance=True)
mock_device.deviceid = 1234567
mock_device._data = {
"canControlHumidification": False,
"hasFan": False,
}
mock_device.system_mode = "off"
mock_device.name = "device1"
mock_device.current_temperature = 20
mock_device.mac_address = "macaddress1"
mock_device.outdoor_temperature = None
mock_device.outdoor_humidity = None
return mock_device
@pytest.fixture
def device_with_outdoor_sensor():
"""Mock a somecomfort.Device."""
mock_device = create_autospec(somecomfort.Device, instance=True)
mock_device.deviceid = 1234567
mock_device._data = {
"canControlHumidification": False,
"hasFan": False,
}
mock_device.system_mode = "off"
mock_device.name = "device1"
mock_device.current_temperature = 20
mock_device.mac_address = "macaddress1"
mock_device.temperature_unit = "C"
mock_device.outdoor_temperature = 5
mock_device.outdoor_humidity = 25
return mock_device
@pytest.fixture
def another_device():
"""Mock a somecomfort.Device."""
mock_device = create_autospec(somecomfort.Device, instance=True)
mock_device.deviceid = 7654321
mock_device._data = {
"canControlHumidification": False,
"hasFan": False,
}
mock_device.system_mode = "off"
mock_device.name = "device2"
mock_device.current_temperature = 20
mock_device.mac_address = "macaddress1"
mock_device.outdoor_temperature = None
mock_device.outdoor_humidity = None
return mock_device
@pytest.fixture
def location(device):
"""Mock a somecomfort.Location."""
mock_location = create_autospec(somecomfort.Location, instance=True)
mock_location.locationid.return_value = "location1"
mock_location.devices_by_id = {device.deviceid: device}
return mock_location
@pytest.fixture(autouse=True)
def client(location):
"""Mock a somecomfort.SomeComfort client."""
client_mock = create_autospec(somecomfort.SomeComfort, instance=True)
client_mock.locations_by_id = {location.locationid: location}
with patch(
"homeassistant.components.honeywell.somecomfort.SomeComfort"
) as sc_class_mock:
sc_class_mock.return_value = client_mock
yield client_mock
| {
"content_hash": "6e9d8b9abc77aae0bc17400339b3b336",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 73,
"avg_line_length": 28.83653846153846,
"alnum_prop": 0.6942314104701567,
"repo_name": "toddeye/home-assistant",
"id": "dcb8edb40159e5e0a94fbba75f30a342c82f835c",
"size": "2999",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/honeywell/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.template import loader, Context, RequestContext
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
def __init__(self, template, context=None, mimetype=None, status=None,
content_type=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client API.
# To avoid the name collision, we use
# tricky-to-debug problems
self.template_name = template
self.context_data = context
# _is_rendered tracks whether the template and context has been baked into
# a final response.
self._is_rendered = False
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', mimetype, status,
content_type)
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, basestring):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Convert context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
if not self._is_rendered:
self._set_content(self.rendered_content)
return self
is_rendered = property(lambda self: self._is_rendered)
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
def _get_content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be rendered before it can be accessed.')
return super(SimpleTemplateResponse, self)._get_content()
def _set_content(self, value):
"Overrides rendered content, unless you later call render()"
super(SimpleTemplateResponse, self)._set_content(value)
self._is_rendered = True
content = property(_get_content, _set_content)
class TemplateResponse(SimpleTemplateResponse):
def __init__(self, request, template, context=None, mimetype=None,
status=None, content_type=None, current_app=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
self._current_app = current_app
super(TemplateResponse, self).__init__(
template, context, mimetype, status, content_type)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return RequestContext(self._request, context, current_app=self._current_app)
| {
"content_hash": "d7ba5853006d0f24c1ea8edb92b5ad30",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 114,
"avg_line_length": 41.45945945945946,
"alnum_prop": 0.6375488917861799,
"repo_name": "bernardokyotoku/skillplant",
"id": "d2d007bb8e29307e492fcf9c16603adea94df7e9",
"size": "4602",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/template/response.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "103281"
},
{
"name": "Python",
"bytes": "4219238"
},
{
"name": "Shell",
"bytes": "500"
}
],
"symlink_target": ""
} |
from snovault import (
CONNECTION,
upgrade_step,
)
@upgrade_step('mad_quality_metric', '1', '2')
def mad_quality_metric_1_2(value, system):
# http://redmine.encodedcc.org/issues/3897
# get from the file the lab and award for the attribution!!!
conn = system['registry'][CONNECTION]
f = conn.get_by_uuid(value['quality_metric_of'][0])
award_uuid = str(f.properties['award'])
lab_uuid = str(f.properties['lab'])
award = conn.get_by_uuid(award_uuid)
lab = conn.get_by_uuid(lab_uuid)
value['award'] = '/awards/'+str(award.properties['name'])+'/'
value['lab'] = '/labs/'+str(lab.properties['name'])+'/'
@upgrade_step('mad_quality_metric', '2', '3')
def mad_quality_metric_2_3(value, system):
return
@upgrade_step('mad_quality_metric', '3', '4')
def mad_quality_metric_3_4(value, system):
# http://redmine.encodedcc.org/issues/2491
if 'assay_term_id' in value:
del value['assay_term_id']
if 'notes' in value:
if value['notes']:
value['notes'] = value['notes'].strip()
else:
del value['notes']
| {
"content_hash": "32e5e3deb70c275891b1c5882d7a89e9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 65,
"avg_line_length": 30.72222222222222,
"alnum_prop": 0.6157323688969258,
"repo_name": "T2DREAM/t2dream-portal",
"id": "cdef51b502af91d76610a7be179d107447704e38",
"size": "1106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/encoded/upgrade/mad_quality_metric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Gherkin",
"bytes": "16776"
},
{
"name": "HTML",
"bytes": "373076"
},
{
"name": "JavaScript",
"bytes": "1320205"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1567328"
},
{
"name": "SCSS",
"bytes": "336182"
},
{
"name": "Shell",
"bytes": "4199"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.